2024-12-07 00:22:36,967 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-07 00:22:37,003 main DEBUG Took 0.032968 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-12-07 00:22:37,005 main DEBUG PluginManager 'Core' found 129 plugins 2024-12-07 00:22:37,006 main DEBUG PluginManager 'Level' found 0 plugins 2024-12-07 00:22:37,007 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-12-07 00:22:37,010 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,022 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-12-07 00:22:37,052 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,054 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,054 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,055 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,055 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,056 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,057 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,057 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,058 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,058 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,059 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,060 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,060 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,061 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,061 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,069 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,070 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,070 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,071 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,071 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,072 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,073 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,076 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,079 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-12-07 00:22:37,080 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,080 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-12-07 00:22:37,082 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-12-07 00:22:37,084 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-12-07 00:22:37,086 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-12-07 00:22:37,086 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-12-07 00:22:37,088 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-12-07 00:22:37,088 main DEBUG PluginManager 'Converter' found 47 plugins 2024-12-07 00:22:37,101 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-12-07 00:22:37,108 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-12-07 00:22:37,112 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-12-07 00:22:37,113 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-12-07 00:22:37,113 main DEBUG createAppenders(={Console}) 2024-12-07 00:22:37,114 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-12-07 00:22:37,115 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-12-07 00:22:37,116 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-12-07 00:22:37,117 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-12-07 00:22:37,118 main DEBUG OutputStream closed 2024-12-07 00:22:37,119 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-12-07 00:22:37,120 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-12-07 00:22:37,121 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-12-07 00:22:37,267 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-12-07 00:22:37,269 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-12-07 00:22:37,271 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-12-07 00:22:37,272 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-12-07 00:22:37,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-12-07 00:22:37,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-12-07 00:22:37,273 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-12-07 00:22:37,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-12-07 00:22:37,274 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-12-07 00:22:37,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-12-07 00:22:37,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-12-07 00:22:37,275 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-12-07 00:22:37,284 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-12-07 00:22:37,285 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-12-07 00:22:37,285 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-12-07 00:22:37,286 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-12-07 00:22:37,286 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-12-07 00:22:37,287 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-12-07 00:22:37,290 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07 00:22:37,290 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-logging/target/hbase-logging-2.7.0-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@29ca3d04) with optional ClassLoader: null 2024-12-07 00:22:37,291 main DEBUG Shutdown hook enabled. Registering a new one. 2024-12-07 00:22:37,292 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@29ca3d04] started OK. 2024-12-07T00:22:37,312 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-12-07 00:22:37,316 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-12-07 00:22:37,316 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-12-07T00:22:37,798 DEBUG [main {}] hbase.HBaseTestingUtility(348): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6 2024-12-07T00:22:37,799 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestSecureExportSnapshot timeout: 13 mins 2024-12-07T00:22:37,853 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-12-07T00:22:38,158 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1126): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-12-07T00:22:38,161 INFO [Time-limited test {}] hbase.HBaseZKTestingUtility(82): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625, deleteOnExit=true 2024-12-07T00:22:38,162 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1140): STARTING DFS 2024-12-07T00:22:38,163 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/test.cache.data in system properties and HBase conf 2024-12-07T00:22:38,163 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T00:22:38,164 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir in system properties and HBase conf 2024-12-07T00:22:38,165 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T00:22:38,165 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T00:22:38,168 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-07T00:22:38,321 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-12-07T00:22:38,326 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:22:38,327 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:22:38,328 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T00:22:38,329 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:22:38,329 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T00:22:38,330 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T00:22:38,331 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:22:38,331 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:22:38,332 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T00:22:38,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/nfs.dump.dir in system properties and HBase conf 2024-12-07T00:22:38,333 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir in system properties and HBase conf 2024-12-07T00:22:38,334 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:22:38,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T00:22:38,335 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T00:22:39,513 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-12-07T00:22:39,616 INFO [Time-limited test {}] log.Log(170): Logging initialized @3991ms to org.eclipse.jetty.util.log.Slf4jLog 2024-12-07T00:22:39,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:39,846 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:39,980 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:39,981 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:39,983 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:40,010 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:40,027 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@744df411{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:40,028 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70357eda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:40,353 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5da2d515{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-38533-hadoop-hdfs-3_4_1-tests_jar-_-any-16995989211235748999/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:22:40,368 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533} 2024-12-07T00:22:40,368 INFO [Time-limited test {}] server.Server(415): Started @4744ms 2024-12-07T00:22:40,912 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:40,924 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:40,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:40,930 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:40,930 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:40,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49de3167{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:40,934 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a0dab5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:41,063 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3d2c3e29{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-35177-hadoop-hdfs-3_4_1-tests_jar-_-any-11094490139748849433/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:22:41,064 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177} 2024-12-07T00:22:41,065 INFO [Time-limited test {}] server.Server(415): Started @5441ms 2024-12-07T00:22:41,133 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:22:41,316 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:41,344 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:41,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:41,359 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:41,360 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:41,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37338c92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:41,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2b9deb82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:41,570 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1886d2b5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-32849-hadoop-hdfs-3_4_1-tests_jar-_-any-9721731444679008009/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:22:41,571 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849} 2024-12-07T00:22:41,572 INFO [Time-limited test {}] server.Server(415): Started @5948ms 2024-12-07T00:22:41,576 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:22:41,737 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:41,748 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:41,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:41,781 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:41,781 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:22:41,791 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@65fa26fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:41,792 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13ce8b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:41,877 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:41,885 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:41,888 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:41,899 WARN [Thread-109 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:41,998 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@666fb670{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-42957-hadoop-hdfs-3_4_1-tests_jar-_-any-1917634516888778817/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:22:42,000 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957} 2024-12-07T00:22:42,001 INFO [Time-limited test {}] server.Server(415): Started @6377ms 2024-12-07T00:22:42,009 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-12-07T00:22:42,032 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:22:42,064 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:22:42,147 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83f252b7387e4847 with lease ID 0x609c7c8b6f66af6c: Processing first storage report for DS-123a0df7-9413-4c73-8e7c-abc949f29669 from datanode DatanodeRegistration(127.0.0.1:45899, datanodeUuid=deaefd16-9ff7-42f6-95fa-e2f923401bf9, infoPort=33269, infoSecurePort=0, ipcPort=36787, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,148 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83f252b7387e4847 with lease ID 0x609c7c8b6f66af6c: from storage DS-123a0df7-9413-4c73-8e7c-abc949f29669 node DatanodeRegistration(127.0.0.1:45899, datanodeUuid=deaefd16-9ff7-42f6-95fa-e2f923401bf9, infoPort=33269, infoSecurePort=0, ipcPort=36787, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,149 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7a5e576d3fd3f89 with lease ID 0x609c7c8b6f66af6d: Processing first storage report for DS-0926e91f-78b3-42a9-bab0-af07d02c8002 from datanode DatanodeRegistration(127.0.0.1:33501, datanodeUuid=59cc2ab6-abd6-4e0b-8943-d15253b3223d, infoPort=38653, infoSecurePort=0, ipcPort=43265, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,149 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7a5e576d3fd3f89 with lease ID 0x609c7c8b6f66af6d: from storage DS-0926e91f-78b3-42a9-bab0-af07d02c8002 node DatanodeRegistration(127.0.0.1:33501, datanodeUuid=59cc2ab6-abd6-4e0b-8943-d15253b3223d, infoPort=38653, infoSecurePort=0, ipcPort=43265, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,150 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd7a5e576d3fd3f89 with lease ID 0x609c7c8b6f66af6d: Processing first storage report for DS-d4c3ac72-4f05-48f2-8581-eefe459d1cde from datanode DatanodeRegistration(127.0.0.1:33501, datanodeUuid=59cc2ab6-abd6-4e0b-8943-d15253b3223d, infoPort=38653, infoSecurePort=0, ipcPort=43265, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd7a5e576d3fd3f89 with lease ID 0x609c7c8b6f66af6d: from storage DS-d4c3ac72-4f05-48f2-8581-eefe459d1cde node DatanodeRegistration(127.0.0.1:33501, datanodeUuid=59cc2ab6-abd6-4e0b-8943-d15253b3223d, infoPort=38653, infoSecurePort=0, ipcPort=43265, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,151 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x83f252b7387e4847 with lease ID 0x609c7c8b6f66af6c: Processing first storage report for DS-1266e526-cf40-4241-bd32-96cc359a209f from datanode DatanodeRegistration(127.0.0.1:45899, datanodeUuid=deaefd16-9ff7-42f6-95fa-e2f923401bf9, infoPort=33269, infoSecurePort=0, ipcPort=36787, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,151 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x83f252b7387e4847 with lease ID 0x609c7c8b6f66af6c: from storage DS-1266e526-cf40-4241-bd32-96cc359a209f node DatanodeRegistration(127.0.0.1:45899, datanodeUuid=deaefd16-9ff7-42f6-95fa-e2f923401bf9, infoPort=33269, infoSecurePort=0, ipcPort=36787, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,348 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:42,356 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153/current, will proceed with Du for space computation calculation, 2024-12-07T00:22:42,553 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-12-07T00:22:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x114a62af4321d472 with lease ID 0x609c7c8b6f66af6e: Processing first storage report for DS-2a736081-df51-4ec6-b4af-8ebc6dcd9938 from datanode DatanodeRegistration(127.0.0.1:33385, datanodeUuid=8b0012f3-4bca-4ac9-98e3-1111e06048df, infoPort=34067, infoSecurePort=0, ipcPort=41739, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x114a62af4321d472 with lease ID 0x609c7c8b6f66af6e: from storage DS-2a736081-df51-4ec6-b4af-8ebc6dcd9938 node DatanodeRegistration(127.0.0.1:33385, datanodeUuid=8b0012f3-4bca-4ac9-98e3-1111e06048df, infoPort=34067, infoSecurePort=0, ipcPort=41739, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,562 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x114a62af4321d472 with lease ID 0x609c7c8b6f66af6e: Processing first storage report for DS-ba5874ab-e401-4878-853e-b0943e253a91 from datanode DatanodeRegistration(127.0.0.1:33385, datanodeUuid=8b0012f3-4bca-4ac9-98e3-1111e06048df, infoPort=34067, infoSecurePort=0, ipcPort=41739, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153) 2024-12-07T00:22:42,563 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x114a62af4321d472 with lease ID 0x609c7c8b6f66af6e: from storage DS-ba5874ab-e401-4878-853e-b0943e253a91 node DatanodeRegistration(127.0.0.1:33385, datanodeUuid=8b0012f3-4bca-4ac9-98e3-1111e06048df, infoPort=34067, infoSecurePort=0, ipcPort=41739, storageInfo=lv=-57;cid=testClusterID;nsid=1437558974;c=1733530959153), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-12-07T00:22:42,672 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(703): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6 2024-12-07T00:22:42,798 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(259): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/zookeeper_0, clientPort=60354, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-12-07T00:22:42,815 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(284): Started MiniZooKeeperCluster and ran 'stat' on client port=60354 2024-12-07T00:22:42,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:42,838 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:43,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:22:43,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:22:43,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741825_1001 (size=7) 2024-12-07T00:22:43,613 INFO [Time-limited test {}] util.FSUtils(490): Created version file at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 with version=8 2024-12-07T00:22:43,613 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1460): Setting hbase.fs.tmp.dir to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/hbase-staging 2024-12-07T00:22:43,772 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-12-07T00:22:44,132 INFO [Time-limited test {}] client.ConnectionUtils(129): master/6e6711bf2d40:0 server-side Connection retries=45 2024-12-07T00:22:44,155 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,156 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:22:44,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,156 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:22:44,304 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:22:44,382 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-12-07T00:22:44,397 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-12-07T00:22:44,402 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:22:44,438 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 14851 (auto-detected) 2024-12-07T00:22:44,440 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-12-07T00:22:44,461 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:35025 2024-12-07T00:22:44,472 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,492 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=master:35025 connecting to ZooKeeper ensemble=127.0.0.1:60354 2024-12-07T00:22:44,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:350250x0, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:22:44,534 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35025-0x10069db04320000 connected 2024-12-07T00:22:44,573 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:22:44,576 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:22:44,598 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:22:44,604 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35025 2024-12-07T00:22:44,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35025 2024-12-07T00:22:44,608 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35025 2024-12-07T00:22:44,612 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35025 2024-12-07T00:22:44,613 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35025 2024-12-07T00:22:44,623 INFO [Time-limited test {}] master.HMaster(488): hbase.rootdir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45, hbase.cluster.distributed=false 2024-12-07T00:22:44,707 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6e6711bf2d40:0 server-side Connection retries=45 2024-12-07T00:22:44,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,707 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,707 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:22:44,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,708 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:22:44,711 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:22:44,714 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:22:44,720 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:36059 2024-12-07T00:22:44,722 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:22:44,730 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:22:44,731 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,733 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,736 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:36059 connecting to ZooKeeper ensemble=127.0.0.1:60354 2024-12-07T00:22:44,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:360590x0, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:22:44,742 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360590x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:22:44,743 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360590x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:22:44,744 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:360590x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:22:44,745 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36059-0x10069db04320001 connected 2024-12-07T00:22:44,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36059 2024-12-07T00:22:44,748 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36059 2024-12-07T00:22:44,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36059 2024-12-07T00:22:44,749 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36059 2024-12-07T00:22:44,750 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36059 2024-12-07T00:22:44,768 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6e6711bf2d40:0 server-side Connection retries=45 2024-12-07T00:22:44,768 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,769 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:22:44,769 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,771 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:22:44,771 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:22:44,771 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:22:44,773 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:33095 2024-12-07T00:22:44,774 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:22:44,777 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:22:44,778 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,782 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,787 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:33095 connecting to ZooKeeper ensemble=127.0.0.1:60354 2024-12-07T00:22:44,797 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:330950x0, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:22:44,800 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:330950x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:22:44,803 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33095-0x10069db04320002 connected 2024-12-07T00:22:44,804 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:22:44,805 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:22:44,811 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33095 2024-12-07T00:22:44,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33095 2024-12-07T00:22:44,812 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33095 2024-12-07T00:22:44,824 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33095 2024-12-07T00:22:44,824 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33095 2024-12-07T00:22:44,847 INFO [Time-limited test {}] client.ConnectionUtils(129): regionserver/6e6711bf2d40:0 server-side Connection retries=45 2024-12-07T00:22:44,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,847 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,848 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-12-07T00:22:44,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-12-07T00:22:44,848 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-12-07T00:22:44,848 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-12-07T00:22:44,849 INFO [Time-limited test {}] ipc.NettyRpcServer(315): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-12-07T00:22:44,858 INFO [Time-limited test {}] ipc.NettyRpcServer(197): Bind to /172.17.0.2:43281 2024-12-07T00:22:44,859 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-12-07T00:22:44,865 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-12-07T00:22:44,867 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,870 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:44,874 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(138): Process identifier=regionserver:43281 connecting to ZooKeeper ensemble=127.0.0.1:60354 2024-12-07T00:22:44,879 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432810x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:22:44,880 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:432810x0, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-12-07T00:22:44,883 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:432810x0, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:22:44,884 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43281-0x10069db04320003 connected 2024-12-07T00:22:44,885 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-12-07T00:22:44,891 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43281 2024-12-07T00:22:44,892 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43281 2024-12-07T00:22:44,894 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43281 2024-12-07T00:22:44,900 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43281 2024-12-07T00:22:44,904 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43281 2024-12-07T00:22:44,908 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(2445): Adding backup master ZNode /hbase/backup-masters/6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:44,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,916 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,917 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,918 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,919 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:44,931 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;6e6711bf2d40:35025 2024-12-07T00:22:44,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:22:44,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:22:44,945 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:22:44,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:44,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:44,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:44,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-12-07T00:22:44,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:44,948 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T00:22:44,949 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(111): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-12-07T00:22:44,949 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/6e6711bf2d40,35025,1733530963762 from backup master directory 2024-12-07T00:22:44,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:44,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-12-07T00:22:44,957 WARN [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:22:44,957 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:44,959 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-12-07T00:22:44,961 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-12-07T00:22:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:22:45,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:22:45,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741826_1002 (size=42) 2024-12-07T00:22:45,067 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] util.FSUtils(639): Created cluster ID file at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/hbase.id with ID: 12c1c9d9-0e76-4c1a-8497-48da635405c7 2024-12-07T00:22:45,149 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-12-07T00:22:45,191 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,192 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,209 WARN [IPC Server handler 1 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:45,209 WARN [IPC Server handler 1 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:45,210 WARN [IPC Server handler 1 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:45,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:22:45,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:22:45,260 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] region.MasterRegion(372): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:22:45,263 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-12-07T00:22:45,289 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:147) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:160) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProvider(WALFactory.java:200) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:232) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:207) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:402) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:973) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2470) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:590) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.master.HMaster.lambda$run$1(HMaster.java:587) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:22:45,295 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:22:45,330 WARN [IPC Server handler 2 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:45,331 WARN [IPC Server handler 2 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:45,331 WARN [IPC Server handler 2 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:45,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:22:45,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:22:45,368 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(7124): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/data/master/store 2024-12-07T00:22:45,382 WARN [IPC Server handler 3 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:45,382 WARN [IPC Server handler 3 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:45,383 WARN [IPC Server handler 3 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:45,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:22:45,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:22:45,401 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-12-07T00:22:45,401 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:45,403 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:22:45,403 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:22:45,403 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:22:45,403 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:22:45,404 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:22:45,404 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1922): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:22:45,404 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T00:22:45,407 WARN [master/6e6711bf2d40:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/data/master/store/.initializing 2024-12-07T00:22:45,407 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:45,417 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:22:45,432 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6e6711bf2d40%2C35025%2C1733530963762, suffix=, logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762, archiveDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/oldWALs, maxLogs=10 2024-12-07T00:22:45,461 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439, exclude list is [], retry=0 2024-12-07T00:22:45,483 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45899,DS-123a0df7-9413-4c73-8e7c-abc949f29669,DISK] 2024-12-07T00:22:45,483 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33385,DS-2a736081-df51-4ec6-b4af-8ebc6dcd9938,DISK] 2024-12-07T00:22:45,483 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33501,DS-0926e91f-78b3-42a9-bab0-af07d02c8002,DISK] 2024-12-07T00:22:45,486 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-12-07T00:22:45,539 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 2024-12-07T00:22:45,540 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33269:33269),(127.0.0.1/127.0.0.1:38653:38653),(127.0.0.1/127.0.0.1:34067:34067)] 2024-12-07T00:22:45,541 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(7285): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:22:45,541 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(894): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:45,548 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(7327): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,549 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(7330): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,596 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,626 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-12-07T00:22:45,630 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:45,633 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:45,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-12-07T00:22:45,639 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:45,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:45,641 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,644 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-12-07T00:22:45,644 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:45,645 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:45,646 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,649 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-12-07T00:22:45,650 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:45,651 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:45,655 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,656 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,667 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-12-07T00:22:45,671 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1085): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-12-07T00:22:45,677 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:45,678 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1102): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71020262, jitterRate=0.058284372091293335}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-12-07T00:22:45,683 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] regionserver.HRegion(1001): Region open journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T00:22:45,684 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-12-07T00:22:45,716 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@520d8fd9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:45,755 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(882): No meta location available on zookeeper, skip migrating... 2024-12-07T00:22:45,766 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-12-07T00:22:45,767 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(633): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-12-07T00:22:45,769 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-12-07T00:22:45,771 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(653): Recovered RegionProcedureStore lease in 1 msec 2024-12-07T00:22:45,778 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(667): Loaded RegionProcedureStore in 7 msec 2024-12-07T00:22:45,778 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-12-07T00:22:45,815 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-12-07T00:22:45,833 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-12-07T00:22:45,836 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/balancer already deleted, retry=false 2024-12-07T00:22:45,839 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-12-07T00:22:45,841 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-12-07T00:22:45,843 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/normalizer already deleted, retry=false 2024-12-07T00:22:45,846 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-12-07T00:22:45,850 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-12-07T00:22:45,852 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/split already deleted, retry=false 2024-12-07T00:22:45,854 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-12-07T00:22:45,855 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/switch/merge already deleted, retry=false 2024-12-07T00:22:45,870 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-12-07T00:22:45,871 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,876 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-12-07T00:22:45,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,877 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,879 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(826): Active/primary master=6e6711bf2d40,35025,1733530963762, sessionid=0x10069db04320000, setting cluster-up flag (Was=false) 2024-12-07T00:22:45,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,899 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-12-07T00:22:45,901 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:45,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:45,912 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-12-07T00:22:45,914 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:46,033 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;6e6711bf2d40:36059 2024-12-07T00:22:46,035 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;6e6711bf2d40:33095 2024-12-07T00:22:46,037 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1008): ClusterId : 12c1c9d9-0e76-4c1a-8497-48da635405c7 2024-12-07T00:22:46,037 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1008): ClusterId : 12c1c9d9-0e76-4c1a-8497-48da635405c7 2024-12-07T00:22:46,041 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:22:46,041 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:22:46,041 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;6e6711bf2d40:43281 2024-12-07T00:22:46,043 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1008): ClusterId : 12c1c9d9-0e76-4c1a-8497-48da635405c7 2024-12-07T00:22:46,043 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-12-07T00:22:46,045 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(3390): Registered master coprocessor service: service=AccessControlService 2024-12-07T00:22:46,048 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:22:46,048 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:22:46,048 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:22:46,049 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-12-07T00:22:46,049 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:22:46,048 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-12-07T00:22:46,050 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:46,051 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-12-07T00:22:46,053 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:22:46,054 DEBUG [RS:1;6e6711bf2d40:33095 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@71473227, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:46,080 DEBUG [RS:1;6e6711bf2d40:33095 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69110e0f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6e6711bf2d40/172.17.0.2:0 2024-12-07T00:22:46,081 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:22:46,082 DEBUG [RS:2;6e6711bf2d40:43281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bf1048b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:46,088 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T00:22:46,088 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T00:22:46,089 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T00:22:46,089 INFO [RS:1;6e6711bf2d40:33095 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:46,089 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T00:22:46,093 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:33095, startcode=1733530964767 2024-12-07T00:22:46,096 DEBUG [RS:2;6e6711bf2d40:43281 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f763b6b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6e6711bf2d40/172.17.0.2:0 2024-12-07T00:22:46,096 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T00:22:46,096 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T00:22:46,097 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T00:22:46,097 INFO [RS:2;6e6711bf2d40:43281 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:46,097 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T00:22:46,098 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-12-07T00:22:46,099 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:43281, startcode=1733530964846 2024-12-07T00:22:46,099 DEBUG [RS:0;6e6711bf2d40:36059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@38e5e8e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:46,112 DEBUG [RS:0;6e6711bf2d40:36059 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@547685d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6e6711bf2d40/172.17.0.2:0 2024-12-07T00:22:46,113 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.RegionServerCoprocessorHost(67): System coprocessor loading is enabled 2024-12-07T00:22:46,113 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.RegionServerCoprocessorHost(68): Table coprocessor loading is enabled 2024-12-07T00:22:46,113 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(900): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-12-07T00:22:46,114 INFO [RS:0;6e6711bf2d40:36059 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:46,114 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1090): About to register with Master. 2024-12-07T00:22:46,116 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:36059, startcode=1733530964705 2024-12-07T00:22:46,119 DEBUG [RS:0;6e6711bf2d40:36059 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:22:46,119 DEBUG [RS:1;6e6711bf2d40:33095 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:22:46,121 DEBUG [RS:2;6e6711bf2d40:43281 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:22:46,175 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59315, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:22:46,176 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT; InitMetaProcedure table=hbase:meta 2024-12-07T00:22:46,177 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38259, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:22:46,178 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46719, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:22:46,183 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T00:22:46,187 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(575): slop=0.2 2024-12-07T00:22:46,191 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(294): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-12-07T00:22:46,192 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T00:22:46,193 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3280) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:593) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:443) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:105) ~[classes/:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) ~[classes/:2.7.0-SNAPSHOT] 2024-12-07T00:22:46,203 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] balancer.RegionLocationFinder(146): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 6e6711bf2d40,35025,1733530963762 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-12-07T00:22:46,207 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/6e6711bf2d40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:22:46,207 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/6e6711bf2d40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:22:46,207 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/6e6711bf2d40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:22:46,207 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/6e6711bf2d40:0, corePoolSize=5, maxPoolSize=5 2024-12-07T00:22:46,207 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/6e6711bf2d40:0, corePoolSize=10, maxPoolSize=10 2024-12-07T00:22:46,208 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,208 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/6e6711bf2d40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:22:46,208 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,219 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:22:46,220 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(75): BOOTSTRAP: creating hbase:meta region 2024-12-07T00:22:46,224 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T00:22:46,224 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T00:22:46,224 WARN [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T00:22:46,224 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(3097): Master is not running yet 2024-12-07T00:22:46,224 WARN [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T00:22:46,224 WARN [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1099): reportForDuty failed; sleeping 100 ms and then retrying. 2024-12-07T00:22:46,226 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:46,227 INFO [PEWorker-1 {}] util.FSTableDescriptors(133): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T00:22:46,233 WARN [IPC Server handler 4 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:46,233 WARN [IPC Server handler 4 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:46,233 WARN [IPC Server handler 4 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:46,240 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1733530996240 2024-12-07T00:22:46,243 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-12-07T00:22:46,244 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-12-07T00:22:46,249 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-12-07T00:22:46,249 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-12-07T00:22:46,250 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-12-07T00:22:46,250 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-12-07T00:22:46,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741831_1007 (size=1039) 2024-12-07T00:22:46,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741831_1007 (size=1039) 2024-12-07T00:22:46,256 INFO [PEWorker-1 {}] util.FSTableDescriptors(140): Updated hbase:meta table descriptor to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1039 2024-12-07T00:22:46,256 INFO [PEWorker-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:46,260 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,269 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-12-07T00:22:46,271 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-12-07T00:22:46,272 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-12-07T00:22:46,280 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-12-07T00:22:46,281 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-12-07T00:22:46,298 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.large.0-1733530966283,5,FailOnTimeoutGroup] 2024-12-07T00:22:46,320 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.small.0-1733530966298,5,FailOnTimeoutGroup] 2024-12-07T00:22:46,320 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,320 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(1680): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-12-07T00:22:46,322 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,322 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,326 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:36059, startcode=1733530964705 2024-12-07T00:22:46,326 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:33095, startcode=1733530964767 2024-12-07T00:22:46,328 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,332 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(486): Registering regionserver=6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,335 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(3073): reportForDuty to master=6e6711bf2d40,35025,1733530963762 with isa=6e6711bf2d40/172.17.0.2:43281, startcode=1733530964846 2024-12-07T00:22:46,349 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:46,349 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,350 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46157 2024-12-07T00:22:46,350 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T00:22:46,350 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(486): Registering regionserver=6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,355 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:22:46,356 DEBUG [RS:0;6e6711bf2d40:36059 {}] zookeeper.ZKUtil(111): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,356 WARN [RS:0;6e6711bf2d40:36059 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:22:46,357 INFO [RS:0;6e6711bf2d40:36059 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:22:46,357 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,358 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:46,358 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46157 2024-12-07T00:22:46,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(332): Checking decommissioned status of RegionServer 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,358 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T00:22:46,358 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] master.ServerManager(486): Registering regionserver=6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,363 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1725): Config from master: hbase.rootdir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:46,363 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1725): Config from master: fs.defaultFS=hdfs://localhost:46157 2024-12-07T00:22:46,363 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1725): Config from master: hbase.master.info.port=-1 2024-12-07T00:22:46,367 DEBUG [RS:1;6e6711bf2d40:33095 {}] zookeeper.ZKUtil(111): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,367 WARN [RS:1;6e6711bf2d40:33095 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:22:46,367 INFO [RS:1;6e6711bf2d40:33095 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:22:46,367 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,367 DEBUG [RS:2;6e6711bf2d40:43281 {}] zookeeper.ZKUtil(111): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,368 WARN [RS:2;6e6711bf2d40:43281 {}] hbase.ZNodeClearer(69): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-12-07T00:22:46,368 INFO [RS:2;6e6711bf2d40:43281 {}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:22:46,368 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(2100): logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:22:46,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6e6711bf2d40,36059,1733530964705] 2024-12-07T00:22:46,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6e6711bf2d40,33095,1733530964767] 2024-12-07T00:22:46,375 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [6e6711bf2d40,43281,1733530964846] 2024-12-07T00:22:46,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:22:46,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741832_1008 (size=32) 2024-12-07T00:22:46,380 DEBUG [PEWorker-1 {}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:46,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:22:46,428 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T00:22:46,440 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:22:46,441 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:46,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:46,443 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:22:46,448 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:22:46,453 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:22:46,453 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:46,453 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T00:22:46,454 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:46,455 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:22:46,457 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.Replication(140): Replication stats-in-log period=300 seconds 2024-12-07T00:22:46,459 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:22:46,459 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:46,461 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:46,464 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740 2024-12-07T00:22:46,465 DEBUG [PEWorker-1 {}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740 2024-12-07T00:22:46,466 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:22:46,469 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T00:22:46,474 DEBUG [PEWorker-1 {}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T00:22:46,474 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-12-07T00:22:46,478 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:46,480 INFO [PEWorker-1 {}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71014642, jitterRate=0.05820062756538391}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T00:22:46,484 DEBUG [PEWorker-1 {}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T00:22:46,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:22:46,485 INFO [PEWorker-1 {}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T00:22:46,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T00:22:46,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:22:46,485 DEBUG [PEWorker-1 {}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:22:46,494 INFO [PEWorker-1 {}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T00:22:46,494 DEBUG [PEWorker-1 {}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T00:22:46,495 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:22:46,499 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(96): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, locked=true; InitMetaProcedure table=hbase:meta 2024-12-07T00:22:46,499 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(107): Going to assign meta 2024-12-07T00:22:46,500 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:22:46,503 INFO [RS:1;6e6711bf2d40:33095 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:22:46,503 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,508 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.MemStoreFlusher(130): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-12-07T00:22:46,512 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-12-07T00:22:46,516 INFO [RS:2;6e6711bf2d40:43281 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:22:46,516 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,516 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T00:22:46,516 INFO [RS:0;6e6711bf2d40:36059 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-12-07T00:22:46,516 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,520 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T00:22:46,526 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,526 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer$CompactionChecker(1988): CompactionChecker runs every PT1S 2024-12-07T00:22:46,526 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,526 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,526 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,527 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,527 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,527 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6e6711bf2d40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:22:46,527 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,527 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,528 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,528 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-12-07T00:22:46,528 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,528 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,528 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,528 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6e6711bf2d40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,529 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,530 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,530 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,530 DEBUG [RS:1;6e6711bf2d40:33095 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,530 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,530 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,531 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,531 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,531 DEBUG [RS:2;6e6711bf2d40:43281 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,531 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,531 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-12-07T00:22:46,531 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,531 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/6e6711bf2d40:0, corePoolSize=2, maxPoolSize=2 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,532 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,533 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,533 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/6e6711bf2d40:0, corePoolSize=1, maxPoolSize=1 2024-12-07T00:22:46,533 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,533 DEBUG [RS:0;6e6711bf2d40:36059 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/6e6711bf2d40:0, corePoolSize=3, maxPoolSize=3 2024-12-07T00:22:46,563 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,563 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,563 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,564 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,564 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,33095,1733530964767-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:22:46,568 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,568 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,568 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,568 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,568 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,43281,1733530964846-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:22:46,569 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,569 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,570 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,570 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,570 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,36059,1733530964705-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:22:46,614 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:22:46,617 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,36059,1733530964705-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,620 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:22:46,621 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,33095,1733530964767-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,622 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-12-07T00:22:46,622 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,43281,1733530964846-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:46,649 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.Replication(204): 6e6711bf2d40,43281,1733530964846 started 2024-12-07T00:22:46,649 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1767): Serving as 6e6711bf2d40,43281,1733530964846, RpcServer on 6e6711bf2d40/172.17.0.2:43281, sessionid=0x10069db04320003 2024-12-07T00:22:46,650 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.Replication(204): 6e6711bf2d40,36059,1733530964705 started 2024-12-07T00:22:46,650 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:22:46,650 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1767): Serving as 6e6711bf2d40,36059,1733530964705, RpcServer on 6e6711bf2d40/172.17.0.2:36059, sessionid=0x10069db04320001 2024-12-07T00:22:46,650 DEBUG [RS:2;6e6711bf2d40:43281 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,650 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,43281,1733530964846' 2024-12-07T00:22:46,650 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:22:46,650 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:22:46,650 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.Replication(204): 6e6711bf2d40,33095,1733530964767 started 2024-12-07T00:22:46,650 DEBUG [RS:0;6e6711bf2d40:36059 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,650 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1767): Serving as 6e6711bf2d40,33095,1733530964767, RpcServer on 6e6711bf2d40/172.17.0.2:33095, sessionid=0x10069db04320002 2024-12-07T00:22:46,650 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,36059,1733530964705' 2024-12-07T00:22:46,650 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:22:46,650 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-12-07T00:22:46,651 DEBUG [RS:1;6e6711bf2d40:33095 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,651 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,33095,1733530964767' 2024-12-07T00:22:46,651 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-12-07T00:22:46,653 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:22:46,653 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:22:46,653 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-12-07T00:22:46,654 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:22:46,654 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:22:46,654 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-12-07T00:22:46,654 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:22:46,654 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:22:46,654 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-12-07T00:22:46,654 DEBUG [RS:2;6e6711bf2d40:43281 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,654 DEBUG [RS:0;6e6711bf2d40:36059 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:46,654 DEBUG [RS:1;6e6711bf2d40:33095 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:46,654 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,43281,1733530964846' 2024-12-07T00:22:46,654 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,36059,1733530964705' 2024-12-07T00:22:46,654 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '6e6711bf2d40,33095,1733530964767' 2024-12-07T00:22:46,654 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:22:46,654 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:22:46,654 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-12-07T00:22:46,655 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:22:46,655 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:22:46,655 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-12-07T00:22:46,656 DEBUG [RS:0;6e6711bf2d40:36059 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:22:46,656 DEBUG [RS:1;6e6711bf2d40:33095 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:22:46,656 INFO [RS:1;6e6711bf2d40:33095 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:22:46,656 INFO [RS:0;6e6711bf2d40:36059 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:22:46,656 DEBUG [RS:2;6e6711bf2d40:43281 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-12-07T00:22:46,656 INFO [RS:1;6e6711bf2d40:33095 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:22:46,656 INFO [RS:0;6e6711bf2d40:36059 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:22:46,656 INFO [RS:2;6e6711bf2d40:43281 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-12-07T00:22:46,656 INFO [RS:2;6e6711bf2d40:43281 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-12-07T00:22:46,682 WARN [6e6711bf2d40:35025 {}] assignment.AssignmentManager(2423): No servers available; cannot place 1 unassigned regions. 2024-12-07T00:22:46,742 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:46,742 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:46,743 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:46,743 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:46,763 INFO [RS:0;6e6711bf2d40:36059 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:22:46,763 INFO [RS:2;6e6711bf2d40:43281 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:22:46,764 INFO [RS:1;6e6711bf2d40:33095 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-12-07T00:22:46,768 INFO [RS:0;6e6711bf2d40:36059 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6e6711bf2d40%2C36059%2C1733530964705, suffix=, logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,36059,1733530964705, archiveDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs, maxLogs=32 2024-12-07T00:22:46,768 INFO [RS:1;6e6711bf2d40:33095 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6e6711bf2d40%2C33095%2C1733530964767, suffix=, logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,33095,1733530964767, archiveDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs, maxLogs=32 2024-12-07T00:22:46,772 INFO [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6e6711bf2d40%2C43281%2C1733530964846, suffix=, logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846, archiveDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs, maxLogs=32 2024-12-07T00:22:46,798 DEBUG [RS:2;6e6711bf2d40:43281 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846/6e6711bf2d40%2C43281%2C1733530964846.1733530966776, exclude list is [], retry=0 2024-12-07T00:22:46,801 WARN [IPC Server handler 3 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:46,802 DEBUG [RS:1;6e6711bf2d40:33095 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,33095,1733530964767/6e6711bf2d40%2C33095%2C1733530964767.1733530966775, exclude list is [], retry=0 2024-12-07T00:22:46,802 WARN [IPC Server handler 3 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:46,802 WARN [IPC Server handler 3 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:46,805 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45899,DS-123a0df7-9413-4c73-8e7c-abc949f29669,DISK] 2024-12-07T00:22:46,805 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33501,DS-0926e91f-78b3-42a9-bab0-af07d02c8002,DISK] 2024-12-07T00:22:46,808 DEBUG [RS:0;6e6711bf2d40:36059 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,36059,1733530964705/6e6711bf2d40%2C36059%2C1733530964705.1733530966775, exclude list is [], retry=0 2024-12-07T00:22:46,811 WARN [IPC Server handler 2 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:46,811 WARN [IPC Server handler 2 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:46,811 WARN [IPC Server handler 2 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:46,814 DEBUG [RS-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33385,DS-2a736081-df51-4ec6-b4af-8ebc6dcd9938,DISK] 2024-12-07T00:22:46,815 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33501,DS-0926e91f-78b3-42a9-bab0-af07d02c8002,DISK] 2024-12-07T00:22:46,815 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45899,DS-123a0df7-9413-4c73-8e7c-abc949f29669,DISK] 2024-12-07T00:22:46,816 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45899,DS-123a0df7-9413-4c73-8e7c-abc949f29669,DISK] 2024-12-07T00:22:46,816 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33501,DS-0926e91f-78b3-42a9-bab0-af07d02c8002,DISK] 2024-12-07T00:22:46,823 INFO [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846/6e6711bf2d40%2C43281%2C1733530964846.1733530966776 2024-12-07T00:22:46,830 INFO [RS:0;6e6711bf2d40:36059 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,36059,1733530964705/6e6711bf2d40%2C36059%2C1733530964705.1733530966775 2024-12-07T00:22:46,832 DEBUG [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:33269:33269),(127.0.0.1/127.0.0.1:38653:38653)] 2024-12-07T00:22:46,836 DEBUG [RS:0;6e6711bf2d40:36059 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38653:38653),(127.0.0.1/127.0.0.1:33269:33269)] 2024-12-07T00:22:46,840 INFO [RS:1;6e6711bf2d40:33095 {}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,33095,1733530964767/6e6711bf2d40%2C33095%2C1733530964767.1733530966775 2024-12-07T00:22:46,840 DEBUG [RS:1;6e6711bf2d40:33095 {}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:34067:34067),(127.0.0.1/127.0.0.1:33269:33269),(127.0.0.1/127.0.0.1:38653:38653)] 2024-12-07T00:22:46,934 DEBUG [6e6711bf2d40:35025 {}] assignment.AssignmentManager(2444): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-12-07T00:22:46,937 DEBUG [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:22:46,946 DEBUG [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:22:46,946 DEBUG [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:22:46,946 DEBUG [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:22:46,946 INFO [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:22:46,946 INFO [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:22:46,946 INFO [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:22:46,946 DEBUG [6e6711bf2d40:35025 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:22:46,951 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:46,958 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6e6711bf2d40,43281,1733530964846, state=OPENING 2024-12-07T00:22:46,964 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-12-07T00:22:46,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:46,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:46,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:46,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:46,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:46,967 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:46,968 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:46,968 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:46,970 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE; OpenRegionProcedure 1588230740, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:22:47,147 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:47,149 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:22:47,152 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34766, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:22:47,167 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(135): Open hbase:meta,,1.1588230740 2024-12-07T00:22:47,168 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(183): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-12-07T00:22:47,168 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-12-07T00:22:47,172 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(500): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=6e6711bf2d40%2C43281%2C1733530964846.meta, suffix=.meta, logDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846, archiveDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs, maxLogs=32 2024-12-07T00:22:47,190 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(617): When create output stream for /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846/6e6711bf2d40%2C43281%2C1733530964846.meta.1733530967173.meta, exclude list is [], retry=0 2024-12-07T00:22:47,194 WARN [IPC Server handler 4 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-12-07T00:22:47,194 WARN [IPC Server handler 4 on default port 46157 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-12-07T00:22:47,194 WARN [IPC Server handler 4 on default port 46157 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-12-07T00:22:47,196 DEBUG [RS-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:33501,DS-0926e91f-78b3-42a9-bab0-af07d02c8002,DISK] 2024-12-07T00:22:47,196 DEBUG [RS-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45899,DS-123a0df7-9413-4c73-8e7c-abc949f29669,DISK] 2024-12-07T00:22:47,199 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(841): New WAL /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/WALs/6e6711bf2d40,43281,1733530964846/6e6711bf2d40%2C43281%2C1733530964846.meta.1733530967173.meta 2024-12-07T00:22:47,200 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(925): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:38653:38653),(127.0.0.1/127.0.0.1:33269:33269)] 2024-12-07T00:22:47,200 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7285): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:22:47,201 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-12-07T00:22:47,203 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:47,204 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-12-07T00:22:47,205 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-12-07T00:22:47,207 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(436): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-12-07T00:22:47,217 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-12-07T00:22:47,218 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(894): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:47,218 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7327): checking encryption for 1588230740 2024-12-07T00:22:47,218 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7330): checking classloading for 1588230740 2024-12-07T00:22:47,224 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-12-07T00:22:47,226 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-12-07T00:22:47,226 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:47,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:47,227 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-12-07T00:22:47,229 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-12-07T00:22:47,229 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:47,230 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:47,231 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-12-07T00:22:47,234 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-12-07T00:22:47,234 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:47,235 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-12-07T00:22:47,238 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740 2024-12-07T00:22:47,241 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740 2024-12-07T00:22:47,244 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (42.7 M)) instead. 2024-12-07T00:22:47,247 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1085): writing seq id for 1588230740 2024-12-07T00:22:47,249 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1102): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63164559, jitterRate=-0.05877472460269928}}}, FlushLargeStoresPolicy{flushSizeLowerBound=44739242} 2024-12-07T00:22:47,254 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1001): Region open journal for 1588230740: 2024-12-07T00:22:47,262 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1733530967141 2024-12-07T00:22:47,277 DEBUG [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:meta,,1.1588230740 2024-12-07T00:22:47,277 INFO [RS_OPEN_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(164): Opened hbase:meta,,1.1588230740 2024-12-07T00:22:47,279 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:47,282 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 6e6711bf2d40,43281,1733530964846, state=OPEN 2024-12-07T00:22:47,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:22:47,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:22:47,284 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:22:47,284 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:47,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-12-07T00:22:47,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:47,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:47,285 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-12-07T00:22:47,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=3, resume processing ppid=2 2024-12-07T00:22:47,291 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=3, ppid=2, state=SUCCESS; OpenRegionProcedure 1588230740, server=6e6711bf2d40,43281,1733530964846 in 315 msec 2024-12-07T00:22:47,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=2, resume processing ppid=1 2024-12-07T00:22:47,300 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=2, ppid=1, state=SUCCESS; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 780 msec 2024-12-07T00:22:47,307 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=1, state=SUCCESS; InitMetaProcedure table=hbase:meta in 1.2470 sec 2024-12-07T00:22:47,307 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(1088): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1733530967307, completionTime=-1 2024-12-07T00:22:47,307 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.ServerManager(907): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-12-07T00:22:47,307 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] assignment.AssignmentManager(1747): Joining cluster... 2024-12-07T00:22:47,352 DEBUG [hconnection-0x26cd44eb-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:47,355 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34782, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:47,377 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] assignment.AssignmentManager(1759): Number of RegionServers=3 2024-12-07T00:22:47,378 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1733531027378 2024-12-07T00:22:47,378 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(81): ADDED pid=-1, state=WAITING_TIMEOUT; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1733531087378 2024-12-07T00:22:47,378 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] assignment.AssignmentManager(1766): Joined the cluster in 70 msec 2024-12-07T00:22:47,408 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] balancer.RegionLocationFinder(172): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:22:47,416 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:47,416 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:47,417 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:47,418 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-6e6711bf2d40:35025, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:47,419 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:47,427 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.TableNamespaceManager(92): Namespace table not found. Creating... 2024-12-07T00:22:47,429 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(2425): Client=null/null create 'hbase:namespace', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T00:22:47,432 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] janitor.CatalogJanitor(179): 2024-12-07T00:22:47,438 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:namespace 2024-12-07T00:22:47,442 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:22:47,443 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:47,446 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:22:47,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741837_1013 (size=358) 2024-12-07T00:22:47,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741837_1013 (size=358) 2024-12-07T00:22:47,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741837_1013 (size=358) 2024-12-07T00:22:47,477 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => adc79d1e913b17fcc146c0c4daf0e7f9, NAME => 'hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:namespace', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '10', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:47,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741838_1014 (size=42) 2024-12-07T00:22:47,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741838_1014 (size=42) 2024-12-07T00:22:47,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741838_1014 (size=42) 2024-12-07T00:22:47,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:47,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1681): Closing adc79d1e913b17fcc146c0c4daf0e7f9, disabling compactions & flushes 2024-12-07T00:22:47,498 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. after waiting 0 ms 2024-12-07T00:22:47,498 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,498 INFO [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1922): Closed hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,499 DEBUG [RegionOpenAndInit-hbase:namespace-pool-0 {}] regionserver.HRegion(1635): Region close journal for adc79d1e913b17fcc146c0c4daf0e7f9: 2024-12-07T00:22:47,501 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:22:47,510 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9.","families":{"info":[{"qualifier":"regioninfo","vlen":41,"tag":[],"timestamp":"1733530967503"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733530967503"}]},"ts":"1733530967503"} 2024-12-07T00:22:47,548 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T00:22:47,552 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:22:47,556 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530967552"}]},"ts":"1733530967552"} 2024-12-07T00:22:47,565 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLING in hbase:meta 2024-12-07T00:22:47,571 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:22:47,575 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:22:47,575 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:22:47,575 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:22:47,575 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:22:47,575 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:22:47,575 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:22:47,575 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:22:47,577 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=adc79d1e913b17fcc146c0c4daf0e7f9, ASSIGN}] 2024-12-07T00:22:47,582 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:namespace, region=adc79d1e913b17fcc146c0c4daf0e7f9, ASSIGN 2024-12-07T00:22:47,585 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:namespace, region=adc79d1e913b17fcc146c0c4daf0e7f9, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:22:47,737 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T00:22:47,738 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=adc79d1e913b17fcc146c0c4daf0e7f9, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:47,743 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE; OpenRegionProcedure adc79d1e913b17fcc146c0c4daf0e7f9, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:22:47,897 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:47,898 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:22:47,900 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51278, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:22:47,907 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(135): Open hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,908 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7285): Opening region: {ENCODED => adc79d1e913b17fcc146c0c4daf0e7f9, NAME => 'hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:22:47,908 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. service=AccessControlService 2024-12-07T00:22:47,909 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:47,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table namespace adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(894): Instantiated hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:47,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7327): checking encryption for adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,909 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7330): checking classloading for adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,912 INFO [StoreOpener-adc79d1e913b17fcc146c0c4daf0e7f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,914 INFO [StoreOpener-adc79d1e913b17fcc146c0c4daf0e7f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region adc79d1e913b17fcc146c0c4daf0e7f9 columnFamilyName info 2024-12-07T00:22:47,915 DEBUG [StoreOpener-adc79d1e913b17fcc146c0c4daf0e7f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:47,916 INFO [StoreOpener-adc79d1e913b17fcc146c0c4daf0e7f9-1 {}] regionserver.HStore(327): Store=adc79d1e913b17fcc146c0c4daf0e7f9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:47,917 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,918 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,922 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1085): writing seq id for adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:22:47,927 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:47,928 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1102): Opened adc79d1e913b17fcc146c0c4daf0e7f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63188229, jitterRate=-0.058422014117240906}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:22:47,930 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1001): Region open journal for adc79d1e913b17fcc146c0c4daf0e7f9: 2024-12-07T00:22:47,932 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9., pid=6, masterSystemTime=1733530967897 2024-12-07T00:22:47,936 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,936 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(164): Opened hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:22:47,937 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=5 updating hbase:meta row=adc79d1e913b17fcc146c0c4daf0e7f9, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:47,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=6, resume processing ppid=5 2024-12-07T00:22:47,946 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=6, ppid=5, state=SUCCESS; OpenRegionProcedure adc79d1e913b17fcc146c0c4daf0e7f9, server=6e6711bf2d40,33095,1733530964767 in 198 msec 2024-12-07T00:22:47,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=5, resume processing ppid=4 2024-12-07T00:22:47,950 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=5, ppid=4, state=SUCCESS; TransitRegionStateProcedure table=hbase:namespace, region=adc79d1e913b17fcc146c0c4daf0e7f9, ASSIGN in 369 msec 2024-12-07T00:22:47,952 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:22:47,952 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:namespace","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530967952"}]},"ts":"1733530967952"} 2024-12-07T00:22:47,955 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:namespace, state=ENABLED in hbase:meta 2024-12-07T00:22:47,959 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:namespace execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:22:47,963 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=4, state=SUCCESS; CreateTableProcedure table=hbase:namespace in 529 msec 2024-12-07T00:22:48,042 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/namespace 2024-12-07T00:22:48,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/namespace 2024-12-07T00:22:48,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:48,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:48,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:48,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:48,065 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:48,068 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51284, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:48,079 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=7, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=default 2024-12-07T00:22:48,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741827_1003 (size=196) 2024-12-07T00:22:48,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741828_1004 (size=1189) 2024-12-07T00:22:48,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T00:22:48,113 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=7, state=SUCCESS; CreateNamespaceProcedure, namespace=default in 40 msec 2024-12-07T00:22:48,124 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=8, state=RUNNABLE:CREATE_NAMESPACE_PREPARE; CreateNamespaceProcedure, namespace=hbase 2024-12-07T00:22:48,138 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/namespace 2024-12-07T00:22:48,146 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=8, state=SUCCESS; CreateNamespaceProcedure, namespace=hbase in 19 msec 2024-12-07T00:22:48,166 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/default 2024-12-07T00:22:48,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/namespace/hbase 2024-12-07T00:22:48,169 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(1218): Master has completed initialization 3.211sec 2024-12-07T00:22:48,171 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-12-07T00:22:48,172 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-12-07T00:22:48,173 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-12-07T00:22:48,173 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-12-07T00:22:48,174 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-12-07T00:22:48,175 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-12-07T00:22:48,175 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-12-07T00:22:48,258 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2028d148 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1772819e 2024-12-07T00:22:48,267 WARN [Time-limited test {}] client.ZKConnectionRegistry(90): ZKConnectionRegistry is deprecated. See https://hbase.apache.org/book.html#client.rpcconnectionregistry 2024-12-07T00:22:48,278 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster$4(2389): Client=null/null create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-12-07T00:22:48,281 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1098): Stored pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=hbase:acl 2024-12-07T00:22:48,286 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:22:48,286 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:48,295 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(713): Client=null/null procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 9 2024-12-07T00:22:48,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1322c480, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:48,297 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:22:48,307 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:48,307 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-12-07T00:22:48,307 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-12-07T00:22:48,334 DEBUG [hconnection-0x7be416da-shared-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:48,353 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741839_1015 (size=349) 2024-12-07T00:22:48,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741839_1015 (size=349) 2024-12-07T00:22:48,354 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741839_1015 (size=349) 2024-12-07T00:22:48,359 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34784, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:48,362 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1199): Minicluster is up; activeMaster=6e6711bf2d40,35025,1733530963762 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2790): Starting mini mapreduce cluster... 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/test.cache.data in system properties and HBase conf 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.tmp.dir in system properties and HBase conf 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir in system properties and HBase conf 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/mapreduce.cluster.local.dir in system properties and HBase conf 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-12-07T00:22:48,363 INFO [Time-limited test {}] hbase.HBaseTestingUtility(811): read short circuit is OFF 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:22:48,364 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-12-07T00:22:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/nfs.dump.dir in system properties and HBase conf 2024-12-07T00:22:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir in system properties and HBase conf 2024-12-07T00:22:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.journalnode.edits.dir in system properties and HBase conf 2024-12-07T00:22:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-12-07T00:22:48,365 INFO [Time-limited test {}] hbase.HBaseTestingUtility(824): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-12-07T00:22:48,412 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:48,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741840_1016 (size=592039) 2024-12-07T00:22:48,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741840_1016 (size=592039) 2024-12-07T00:22:48,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741840_1016 (size=592039) 2024-12-07T00:22:48,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741841_1017 (size=1663647) 2024-12-07T00:22:48,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741841_1017 (size=1663647) 2024-12-07T00:22:48,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741841_1017 (size=1663647) 2024-12-07T00:22:48,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741829_1005 (size=34) 2024-12-07T00:22:48,613 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:48,806 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 37320884d80707cf3a5cc8fc74ed6256, NAME => 'hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:48,913 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:49,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741842_1018 (size=36) 2024-12-07T00:22:49,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741842_1018 (size=36) 2024-12-07T00:22:49,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741842_1018 (size=36) 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(894): Instantiated hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1681): Closing 37320884d80707cf3a5cc8fc74ed6256, disabling compactions & flushes 2024-12-07T00:22:49,124 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1703): Closing region hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. after waiting 0 ms 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,124 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1922): Closed hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,124 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 37320884d80707cf3a5cc8fc74ed6256: 2024-12-07T00:22:49,127 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:22:49,128 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1733530969127"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733530969127"}]},"ts":"1733530969127"} 2024-12-07T00:22:49,132 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 1 regions to meta. 2024-12-07T00:22:49,134 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:22:49,135 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530969134"}]},"ts":"1733530969134"} 2024-12-07T00:22:49,138 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-12-07T00:22:49,148 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:22:49,150 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:22:49,150 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:22:49,150 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:22:49,151 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:22:49,151 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:22:49,151 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:22:49,151 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:22:49,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=37320884d80707cf3a5cc8fc74ed6256, ASSIGN}] 2024-12-07T00:22:49,154 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=hbase:acl, region=37320884d80707cf3a5cc8fc74ed6256, ASSIGN 2024-12-07T00:22:49,156 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=10, ppid=9, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=hbase:acl, region=37320884d80707cf3a5cc8fc74ed6256, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:22:49,306 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T00:22:49,307 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=37320884d80707cf3a5cc8fc74ed6256, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:49,311 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=11, ppid=10, state=RUNNABLE; OpenRegionProcedure 37320884d80707cf3a5cc8fc74ed6256, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:22:49,413 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:49,512 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:49,591 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(135): Open hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7285): Opening region: {ENCODED => 37320884d80707cf3a5cc8fc74ed6256, NAME => 'hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:22:49,592 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7999): Registered coprocessor service: region=hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. service=AccessControlService 2024-12-07T00:22:49,593 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:49,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(894): Instantiated hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:49,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7327): checking encryption for 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,593 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(7330): checking classloading for 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,612 INFO [StoreOpener-37320884d80707cf3a5cc8fc74ed6256-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,629 INFO [StoreOpener-37320884d80707cf3a5cc8fc74ed6256-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 37320884d80707cf3a5cc8fc74ed6256 columnFamilyName l 2024-12-07T00:22:49,629 DEBUG [StoreOpener-37320884d80707cf3a5cc8fc74ed6256-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:49,632 INFO [StoreOpener-37320884d80707cf3a5cc8fc74ed6256-1 {}] regionserver.HStore(327): Store=37320884d80707cf3a5cc8fc74ed6256/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:49,635 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,636 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,642 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1085): writing seq id for 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:22:49,662 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:49,663 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1102): Opened 37320884d80707cf3a5cc8fc74ed6256; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73000117, jitterRate=0.08778651058673859}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:22:49,666 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegion(1001): Region open journal for 37320884d80707cf3a5cc8fc74ed6256: 2024-12-07T00:22:49,669 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2601): Post open deploy tasks for hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256., pid=11, masterSystemTime=1733530969512 2024-12-07T00:22:49,677 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] regionserver.HRegionServer(2628): Finished post open deploy task for hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,678 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=10 updating hbase:meta row=37320884d80707cf3a5cc8fc74ed6256, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:22:49,683 INFO [RS_OPEN_PRIORITY_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=11}] handler.AssignRegionHandler(164): Opened hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:22:49,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=11, resume processing ppid=10 2024-12-07T00:22:49,692 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=11, ppid=10, state=SUCCESS; OpenRegionProcedure 37320884d80707cf3a5cc8fc74ed6256, server=6e6711bf2d40,33095,1733530964767 in 375 msec 2024-12-07T00:22:49,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=10, resume processing ppid=9 2024-12-07T00:22:49,698 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=10, ppid=9, state=SUCCESS; TransitRegionStateProcedure table=hbase:acl, region=37320884d80707cf3a5cc8fc74ed6256, ASSIGN in 541 msec 2024-12-07T00:22:49,702 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:22:49,702 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530969702"}]},"ts":"1733530969702"} 2024-12-07T00:22:49,707 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-12-07T00:22:49,713 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=9, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:22:49,719 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=9, state=SUCCESS; CreateTableProcedure table=hbase:acl in 1.4350 sec 2024-12-07T00:22:50,414 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=9 2024-12-07T00:22:50,414 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: hbase:acl, procId: 9 completed 2024-12-07T00:22:50,425 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster {}] master.HMaster(1321): Balancer post startup initialization complete, took 0 seconds 2024-12-07T00:22:50,426 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-12-07T00:22:50,427 INFO [master/6e6711bf2d40:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=6e6711bf2d40,35025,1733530963762-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-12-07T00:22:50,730 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:50,812 WARN [Thread-390 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:51,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741831_1007 (size=1039) 2024-12-07T00:22:51,205 INFO [Thread-390 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:51,207 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T00:22:51,208 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:51,228 INFO [Thread-390 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:51,228 INFO [Thread-390 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:51,228 INFO [Thread-390 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:51,229 INFO [Thread-390 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@245d5c7b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:51,229 INFO [Thread-390 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@588bf098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:51,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:51,280 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:51,280 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:51,294 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:51,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7ff40a8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:51,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@66ade828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:51,426 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T00:22:51,426 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-12-07T00:22:51,426 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T00:22:51,429 INFO [Thread-390 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T00:22:51,496 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:52,036 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:52,617 INFO [Thread-390 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:52,646 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@108a0d16{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-46199-hadoop-yarn-common-3_4_1_jar-_-any-3074553497017269475/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-07T00:22:52,646 INFO [Thread-390 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6dd43818{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-34055-hadoop-yarn-common-3_4_1_jar-_-any-8363686002723398599/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-07T00:22:52,647 INFO [Thread-390 {}] server.AbstractConnector(333): Started ServerConnector@3d55d21f{HTTP/1.1, (http/1.1)}{localhost:34055} 2024-12-07T00:22:52,647 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@28028d61{HTTP/1.1, (http/1.1)}{localhost:46199} 2024-12-07T00:22:52,647 INFO [Thread-390 {}] server.Server(415): Started @17023ms 2024-12-07T00:22:52,647 INFO [Time-limited test {}] server.Server(415): Started @17023ms 2024-12-07T00:22:52,737 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:22:52,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741843_1019 (size=5) 2024-12-07T00:22:52,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741843_1019 (size=5) 2024-12-07T00:22:52,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741843_1019 (size=5) 2024-12-07T00:22:52,887 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-12-07T00:22:52,889 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:namespace' 2024-12-07T00:22:52,890 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-12-07T00:22:54,260 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-07T00:22:54,267 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:54,329 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T00:22:54,330 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:54,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:54,339 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:54,339 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-12-07T00:22:54,340 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:54,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6f349f2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:54,344 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@501a7a4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:54,382 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:22:54,382 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-12-07T00:22:54,383 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-12-07T00:22:54,383 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-12-07T00:22:54,386 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:22:54,386 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-12-07T00:22:54,387 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T00:22:54,387 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-12-07T00:22:54,388 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-07T00:22:54,388 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_namespace Metrics about Tables on a single HBase RegionServer 2024-12-07T00:22:54,389 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-07T00:22:54,390 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-12-07T00:22:54,391 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:22:54,391 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-12-07T00:22:54,391 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T00:22:54,391 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-12-07T00:22:54,391 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:22:54,392 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-12-07T00:22:54,423 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-07T00:22:54,424 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T00:22:54,424 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T00:22:54,424 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T00:22:54,433 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:54,474 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:54,703 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:54,717 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5cc9ed7e{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-39491-hadoop-yarn-common-3_4_1_jar-_-any-347339651731174849/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T00:22:54,718 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@66d0c4b3{HTTP/1.1, (http/1.1)}{localhost:39491} 2024-12-07T00:22:54,718 INFO [Time-limited test {}] server.Server(415): Started @19094ms 2024-12-07T00:22:54,938 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-12-07T00:22:54,941 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:54,953 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-12-07T00:22:54,954 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-12-07T00:22:54,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-12-07T00:22:54,960 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-12-07T00:22:54,960 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-12-07T00:22:54,961 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-12-07T00:22:54,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@37e3940c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,AVAILABLE} 2024-12-07T00:22:54,963 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7be3ffc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-12-07T00:22:55,023 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-12-07T00:22:55,023 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-12-07T00:22:55,023 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-12-07T00:22:55,024 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-12-07T00:22:55,035 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:55,047 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:55,240 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-12-07T00:22:55,248 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39ff7dfc{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/java.io.tmpdir/jetty-localhost-39759-hadoop-yarn-common-3_4_1_jar-_-any-5270322132781754748/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T00:22:55,264 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58fcc341{HTTP/1.1, (http/1.1)}{localhost:39759} 2024-12-07T00:22:55,264 INFO [Time-limited test {}] server.Server(415): Started @19640ms 2024-12-07T00:22:55,338 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2825): Mini mapreduce cluster started 2024-12-07T00:22:55,340 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:22:55,371 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=719, OpenFileDescriptor=765, MaxFileDescriptor=1048576, SystemLoadAverage=523, ProcessCount=11, AvailableMemoryMB=5941 2024-12-07T00:22:55,371 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=719 is superior to 500 2024-12-07T00:22:55,383 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-12-07T00:22:55,387 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54892, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-12-07T00:22:55,392 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:22:55,394 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:22:55,396 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:22:55,397 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 12 2024-12-07T00:22:55,397 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:55,398 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:22:55,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T00:22:55,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741844_1020 (size=406) 2024-12-07T00:22:55,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741844_1020 (size=406) 2024-12-07T00:22:55,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741844_1020 (size=406) 2024-12-07T00:22:55,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T00:22:55,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T00:22:55,838 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 7eedb80957f4779681b5aac1294bebb9, NAME => 'testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:55,839 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => f26d3c291cea0bc39ee497b79e25df36, NAME => 'testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:55,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741846_1022 (size=67) 2024-12-07T00:22:55,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741846_1022 (size=67) 2024-12-07T00:22:55,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741846_1022 (size=67) 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1681): Closing 7eedb80957f4779681b5aac1294bebb9, disabling compactions & flushes 2024-12-07T00:22:55,900 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. after waiting 0 ms 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:55,900 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:55,900 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1635): Region close journal for 7eedb80957f4779681b5aac1294bebb9: 2024-12-07T00:22:55,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741845_1021 (size=67) 2024-12-07T00:22:55,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741845_1021 (size=67) 2024-12-07T00:22:55,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741845_1021 (size=67) 2024-12-07T00:22:55,913 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:55,914 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1681): Closing f26d3c291cea0bc39ee497b79e25df36, disabling compactions & flushes 2024-12-07T00:22:55,914 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:55,914 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:55,914 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. after waiting 0 ms 2024-12-07T00:22:55,914 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:55,914 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:55,914 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1635): Region close journal for f26d3c291cea0bc39ee497b79e25df36: 2024-12-07T00:22:55,916 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:22:55,917 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733530975916"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733530975916"}]},"ts":"1733530975916"} 2024-12-07T00:22:55,917 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1733530975916"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733530975916"}]},"ts":"1733530975916"} 2024-12-07T00:22:55,953 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:22:55,955 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:22:55,955 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530975955"}]},"ts":"1733530975955"} 2024-12-07T00:22:55,958 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-12-07T00:22:55,963 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:22:55,966 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:22:55,966 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:22:55,966 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:22:55,966 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:22:55,966 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:22:55,966 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:22:55,966 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:22:55,967 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, ASSIGN}, {pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, ASSIGN}] 2024-12-07T00:22:55,970 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, ASSIGN 2024-12-07T00:22:55,971 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, ASSIGN 2024-12-07T00:22:55,974 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=13, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:22:55,974 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=14, ppid=12, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:22:56,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T00:22:56,125 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:22:56,126 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=7eedb80957f4779681b5aac1294bebb9, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:56,126 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=f26d3c291cea0bc39ee497b79e25df36, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:56,129 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=15, ppid=13, state=RUNNABLE; OpenRegionProcedure 7eedb80957f4779681b5aac1294bebb9, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:22:56,131 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=16, ppid=14, state=RUNNABLE; OpenRegionProcedure f26d3c291cea0bc39ee497b79e25df36, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:22:56,283 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:56,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:56,286 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:22:56,306 INFO [RS-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60126, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:22:56,315 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:56,315 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7285): Opening region: {ENCODED => f26d3c291cea0bc39ee497b79e25df36, NAME => 'testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:22:56,316 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. service=AccessControlService 2024-12-07T00:22:56,316 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:56,316 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,316 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:56,317 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7327): checking encryption for f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,317 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(7330): checking classloading for f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,320 INFO [StoreOpener-f26d3c291cea0bc39ee497b79e25df36-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,321 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(135): Open testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,321 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7285): Opening region: {ENCODED => 7eedb80957f4779681b5aac1294bebb9, NAME => 'testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:22:56,321 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. service=AccessControlService 2024-12-07T00:22:56,322 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:22:56,322 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,322 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(894): Instantiated testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:22:56,322 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7327): checking encryption for 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,322 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(7330): checking classloading for 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,324 INFO [StoreOpener-f26d3c291cea0bc39ee497b79e25df36-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f26d3c291cea0bc39ee497b79e25df36 columnFamilyName cf 2024-12-07T00:22:56,324 DEBUG [StoreOpener-f26d3c291cea0bc39ee497b79e25df36-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:56,325 INFO [StoreOpener-f26d3c291cea0bc39ee497b79e25df36-1 {}] regionserver.HStore(327): Store=f26d3c291cea0bc39ee497b79e25df36/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:56,327 INFO [StoreOpener-7eedb80957f4779681b5aac1294bebb9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,328 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,328 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,330 INFO [StoreOpener-7eedb80957f4779681b5aac1294bebb9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7eedb80957f4779681b5aac1294bebb9 columnFamilyName cf 2024-12-07T00:22:56,330 DEBUG [StoreOpener-7eedb80957f4779681b5aac1294bebb9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:22:56,331 INFO [StoreOpener-7eedb80957f4779681b5aac1294bebb9-1 {}] regionserver.HStore(327): Store=7eedb80957f4779681b5aac1294bebb9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:22:56,333 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,333 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1085): writing seq id for f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,333 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,338 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1085): writing seq id for 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,340 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:56,341 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1102): Opened f26d3c291cea0bc39ee497b79e25df36; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59658781, jitterRate=-0.11101488769054413}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:22:56,342 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegion(1001): Region open journal for f26d3c291cea0bc39ee497b79e25df36: 2024-12-07T00:22:56,344 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36., pid=16, masterSystemTime=1733530976286 2024-12-07T00:22:56,347 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:56,347 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=16}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:56,348 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=14 updating hbase:meta row=f26d3c291cea0bc39ee497b79e25df36, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:56,350 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=6e6711bf2d40,36059,1733530964705, table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-07T00:22:56,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=16, resume processing ppid=14 2024-12-07T00:22:56,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=16, ppid=14, state=SUCCESS; OpenRegionProcedure f26d3c291cea0bc39ee497b79e25df36, server=6e6711bf2d40,36059,1733530964705 in 220 msec 2024-12-07T00:22:56,357 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:22:56,359 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1102): Opened 7eedb80957f4779681b5aac1294bebb9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58968810, jitterRate=-0.12129625678062439}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:22:56,359 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegion(1001): Region open journal for 7eedb80957f4779681b5aac1294bebb9: 2024-12-07T00:22:56,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=14, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, ASSIGN in 389 msec 2024-12-07T00:22:56,360 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9., pid=15, masterSystemTime=1733530976283 2024-12-07T00:22:56,364 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,364 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=15}] handler.AssignRegionHandler(164): Opened testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,366 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=13 updating hbase:meta row=7eedb80957f4779681b5aac1294bebb9, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:56,375 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=15, resume processing ppid=13 2024-12-07T00:22:56,376 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=15, ppid=13, state=SUCCESS; OpenRegionProcedure 7eedb80957f4779681b5aac1294bebb9, server=6e6711bf2d40,43281,1733530964846 in 241 msec 2024-12-07T00:22:56,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=13, resume processing ppid=12 2024-12-07T00:22:56,381 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=13, ppid=12, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, ASSIGN in 408 msec 2024-12-07T00:22:56,383 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:22:56,383 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733530976383"}]},"ts":"1733530976383"} 2024-12-07T00:22:56,386 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-12-07T00:22:56,391 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=12, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:22:56,397 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-12-07T00:22:56,418 DEBUG [hconnection-0x2c5ba480-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:56,420 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-12-07T00:22:56,429 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:56,457 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-12-07T00:22:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:22:56,467 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T00:22:56,468 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T00:22:56,472 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=12, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithTargetName in 1.0760 sec 2024-12-07T00:22:56,477 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T00:22:56,480 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-12-07T00:22:56,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=12 2024-12-07T00:22:56,508 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName, procId: 12 completed 2024-12-07T00:22:56,508 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithTargetName get assigned. Timeout = 60000ms 2024-12-07T00:22:56,509 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:22:56,516 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithTargetName assigned to meta. Checking AM states. 2024-12-07T00:22:56,517 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:22:56,517 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithTargetName assigned. 2024-12-07T00:22:56,533 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T00:22:56,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733530976533 (current time:1733530976533). 2024-12-07T00:22:56,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:22:56,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-07T00:22:56,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:22:56,535 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2ca3a64b to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1562076b 2024-12-07T00:22:56,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5205793a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:56,540 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:56,542 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57712, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:56,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2ca3a64b to 127.0.0.1:60354 2024-12-07T00:22:56,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:22:56,548 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b76693a to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@939b39b 2024-12-07T00:22:56,552 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@391d0db6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:56,557 DEBUG [hconnection-0x700d92e9-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:56,559 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57726, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:56,561 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:56,563 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53246, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:56,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b76693a to 127.0.0.1:60354 2024-12-07T00:22:56,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:22:56,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T00:22:56,578 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:22:56,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=17, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T00:22:56,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-07T00:22:56,586 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:22:56,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T00:22:56,592 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:22:56,604 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:22:56,620 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741847_1023 (size=167) 2024-12-07T00:22:56,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741847_1023 (size=167) 2024-12-07T00:22:56,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741847_1023 (size=167) 2024-12-07T00:22:56,623 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:22:56,627 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9}, {pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36}] 2024-12-07T00:22:56,632 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,632 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T00:22:56,789 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:56,789 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:56,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=19 2024-12-07T00:22:56,790 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=18 2024-12-07T00:22:56,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:56,791 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.HRegion(2538): Flush status journal for f26d3c291cea0bc39ee497b79e25df36: 2024-12-07T00:22:56,793 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.HRegion(2538): Flush status journal for 7eedb80957f4779681b5aac1294bebb9: 2024-12-07T00:22:56,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. for emptySnaptb0-testExportWithTargetName completed. 2024-12-07T00:22:56,794 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. for emptySnaptb0-testExportWithTargetName completed. 2024-12-07T00:22:56,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-07T00:22:56,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-12-07T00:22:56,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:22:56,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:22:56,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:22:56,801 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:22:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741848_1024 (size=70) 2024-12-07T00:22:56,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741848_1024 (size=70) 2024-12-07T00:22:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741848_1024 (size=70) 2024-12-07T00:22:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741849_1025 (size=70) 2024-12-07T00:22:56,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741849_1025 (size=70) 2024-12-07T00:22:56,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741849_1025 (size=70) 2024-12-07T00:22:56,821 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:56,823 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=18}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=18 2024-12-07T00:22:56,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=19}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=19 2024-12-07T00:22:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=18 2024-12-07T00:22:56,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=19 2024-12-07T00:22:56,826 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,826 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,826 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=19, ppid=17, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:56,827 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=18, ppid=17, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:56,830 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=19, ppid=17, state=SUCCESS; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 in 201 msec 2024-12-07T00:22:56,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=18, resume processing ppid=17 2024-12-07T00:22:56,832 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=18, ppid=17, state=SUCCESS; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 in 201 msec 2024-12-07T00:22:56,832 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:22:56,834 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:22:56,836 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:22:56,837 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-12-07T00:22:56,839 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-12-07T00:22:56,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741850_1026 (size=549) 2024-12-07T00:22:56,864 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741850_1026 (size=549) 2024-12-07T00:22:56,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741850_1026 (size=549) 2024-12-07T00:22:56,871 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:22:56,882 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:22:56,883 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-12-07T00:22:56,886 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=17, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:22:56,887 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 17 2024-12-07T00:22:56,889 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=17, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=17, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 306 msec 2024-12-07T00:22:56,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=17 2024-12-07T00:22:56,890 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 17 completed 2024-12-07T00:22:56,918 DEBUG [htable-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:56,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:22:56,925 INFO [RS-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60142, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:56,928 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:22:56,939 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithTargetName 2024-12-07T00:22:56,940 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:56,941 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:22:56,993 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T00:22:56,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733530976993 (current time:1733530976993). 2024-12-07T00:22:56,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:22:56,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-12-07T00:22:56,993 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:22:56,997 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x09498f20 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@778813bb 2024-12-07T00:22:57,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f994fd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:57,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:57,023 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57732, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:57,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x09498f20 to 127.0.0.1:60354 2024-12-07T00:22:57,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:22:57,028 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x61be3277 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@561cf030 2024-12-07T00:22:57,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@230ee2da, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:22:57,051 DEBUG [hconnection-0x5b77f6eb-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:57,054 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57748, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:57,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:22:57,058 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53258, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:22:57,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x61be3277 to 127.0.0.1:60354 2024-12-07T00:22:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:22:57,060 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-12-07T00:22:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:22:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=20, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-12-07T00:22:57,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-07T00:22:57,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T00:22:57,072 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:22:57,074 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:22:57,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:22:57,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741851_1027 (size=162) 2024-12-07T00:22:57,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741851_1027 (size=162) 2024-12-07T00:22:57,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741851_1027 (size=162) 2024-12-07T00:22:57,130 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:22:57,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9}, {pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36}] 2024-12-07T00:22:57,133 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:57,133 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:57,169 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T00:22:57,286 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:22:57,286 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:22:57,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=21 2024-12-07T00:22:57,287 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=22 2024-12-07T00:22:57,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:57,288 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:57,288 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2837): Flushing 7eedb80957f4779681b5aac1294bebb9 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-07T00:22:57,288 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2837): Flushing f26d3c291cea0bc39ee497b79e25df36 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-07T00:22:57,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T00:22:57,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/.tmp/cf/e378417012fa48989ee3db7d7d3cb77d is 71, key is 104731a53bc5e510c15b7e564ab8c1f5/cf:q/1733530976928/Put/seqid=0 2024-12-07T00:22:57,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/.tmp/cf/da39cec0da0c4afea945059f9cd49160 is 71, key is 02247ef3d52f6858e53cb76664ffbba7/cf:q/1733530976922/Put/seqid=0 2024-12-07T00:22:57,427 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741852_1028 (size=8324) 2024-12-07T00:22:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741852_1028 (size=8324) 2024-12-07T00:22:57,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741852_1028 (size=8324) 2024-12-07T00:22:57,433 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/.tmp/cf/e378417012fa48989ee3db7d7d3cb77d 2024-12-07T00:22:57,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741853_1029 (size=5288) 2024-12-07T00:22:57,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741853_1029 (size=5288) 2024-12-07T00:22:57,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741853_1029 (size=5288) 2024-12-07T00:22:57,439 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/.tmp/cf/da39cec0da0c4afea945059f9cd49160 2024-12-07T00:22:57,516 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/.tmp/cf/e378417012fa48989ee3db7d7d3cb77d as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d 2024-12-07T00:22:57,517 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/.tmp/cf/da39cec0da0c4afea945059f9cd49160 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160 2024-12-07T00:22:57,530 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d, entries=47, sequenceid=6, filesize=8.1 K 2024-12-07T00:22:57,534 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for f26d3c291cea0bc39ee497b79e25df36 in 245ms, sequenceid=6, compaction requested=false 2024-12-07T00:22:57,534 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-12-07T00:22:57,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.HRegion(2538): Flush status journal for f26d3c291cea0bc39ee497b79e25df36: 2024-12-07T00:22:57,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. for snaptb0-testExportWithTargetName completed. 2024-12-07T00:22:57,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-07T00:22:57,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:22:57,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d] hfiles 2024-12-07T00:22:57,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d for snapshot=snaptb0-testExportWithTargetName 2024-12-07T00:22:57,536 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160, entries=3, sequenceid=6, filesize=5.2 K 2024-12-07T00:22:57,537 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 7eedb80957f4779681b5aac1294bebb9 in 249ms, sequenceid=6, compaction requested=false 2024-12-07T00:22:57,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.HRegion(2538): Flush status journal for 7eedb80957f4779681b5aac1294bebb9: 2024-12-07T00:22:57,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. for snaptb0-testExportWithTargetName completed. 2024-12-07T00:22:57,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-12-07T00:22:57,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:22:57,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160] hfiles 2024-12-07T00:22:57,538 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160 for snapshot=snaptb0-testExportWithTargetName 2024-12-07T00:22:57,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741854_1030 (size=109) 2024-12-07T00:22:57,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741854_1030 (size=109) 2024-12-07T00:22:57,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741854_1030 (size=109) 2024-12-07T00:22:57,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:22:57,584 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=21}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=21 2024-12-07T00:22:57,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=21 2024-12-07T00:22:57,585 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:57,585 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=21, ppid=20, state=RUNNABLE; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:22:57,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741855_1031 (size=109) 2024-12-07T00:22:57,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741855_1031 (size=109) 2024-12-07T00:22:57,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741855_1031 (size=109) 2024-12-07T00:22:57,594 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=21, ppid=20, state=SUCCESS; SnapshotRegionProcedure 7eedb80957f4779681b5aac1294bebb9 in 456 msec 2024-12-07T00:22:57,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:22:57,596 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=22}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=22 2024-12-07T00:22:57,596 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=22 2024-12-07T00:22:57,597 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:57,597 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=22, ppid=20, state=RUNNABLE; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:22:57,601 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=22, resume processing ppid=20 2024-12-07T00:22:57,601 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=22, ppid=20, state=SUCCESS; SnapshotRegionProcedure f26d3c291cea0bc39ee497b79e25df36 in 468 msec 2024-12-07T00:22:57,601 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:22:57,604 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:22:57,605 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:22:57,605 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-12-07T00:22:57,607 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-12-07T00:22:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741856_1032 (size=627) 2024-12-07T00:22:57,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741856_1032 (size=627) 2024-12-07T00:22:57,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741856_1032 (size=627) 2024-12-07T00:22:57,639 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:22:57,655 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:22:57,656 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-07T00:22:57,664 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=20, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:22:57,664 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 20 2024-12-07T00:22:57,671 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=20, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=20, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 603 msec 2024-12-07T00:22:57,673 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=20 2024-12-07T00:22:57,674 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName, procId: 20 completed 2024-12-07T00:22:57,674 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674 2024-12-07T00:22:57,674 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:57,739 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:22:57,743 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-07T00:22:57,751 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:22:57,786 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/.tmp/testExportWithTargetName 2024-12-07T00:22:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741857_1033 (size=162) 2024-12-07T00:22:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741857_1033 (size=162) 2024-12-07T00:22:57,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741857_1033 (size=162) 2024-12-07T00:22:57,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741858_1034 (size=627) 2024-12-07T00:22:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741858_1034 (size=627) 2024-12-07T00:22:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741858_1034 (size=627) 2024-12-07T00:22:57,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741859_1035 (size=154) 2024-12-07T00:22:57,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741859_1035 (size=154) 2024-12-07T00:22:57,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741859_1035 (size=154) 2024-12-07T00:22:57,897 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:57,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:57,898 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:57,899 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,341 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-16121053314330454112.jar 2024-12-07T00:22:59,342 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,342 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,447 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-7229809637585300510.jar 2024-12-07T00:22:59,448 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,449 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,450 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,451 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,462 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:22:59,463 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:22:59,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:22:59,464 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:22:59,465 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:22:59,466 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:22:59,466 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:22:59,467 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:22:59,467 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:22:59,472 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:22:59,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:22:59,473 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:22:59,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:22:59,477 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:22:59,478 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:22:59,478 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:22:59,479 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:22:59,479 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:22:59,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:22:59,480 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:22:59,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741860_1036 (size=127628) 2024-12-07T00:22:59,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741860_1036 (size=127628) 2024-12-07T00:22:59,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741860_1036 (size=127628) 2024-12-07T00:22:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T00:22:59,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T00:22:59,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741861_1037 (size=2172101) 2024-12-07T00:22:59,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741862_1038 (size=213228) 2024-12-07T00:22:59,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741862_1038 (size=213228) 2024-12-07T00:22:59,714 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741862_1038 (size=213228) 2024-12-07T00:22:59,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T00:22:59,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T00:22:59,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741863_1039 (size=1877034) 2024-12-07T00:22:59,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741864_1040 (size=533455) 2024-12-07T00:22:59,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741864_1040 (size=533455) 2024-12-07T00:22:59,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741864_1040 (size=533455) 2024-12-07T00:22:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T00:22:59,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T00:22:59,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741865_1041 (size=7280644) 2024-12-07T00:22:59,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T00:22:59,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T00:22:59,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741866_1042 (size=4188619) 2024-12-07T00:22:59,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741867_1043 (size=20406) 2024-12-07T00:22:59,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741867_1043 (size=20406) 2024-12-07T00:22:59,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741867_1043 (size=20406) 2024-12-07T00:22:59,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741868_1044 (size=75495) 2024-12-07T00:22:59,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741868_1044 (size=75495) 2024-12-07T00:22:59,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741868_1044 (size=75495) 2024-12-07T00:22:59,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741869_1045 (size=45609) 2024-12-07T00:22:59,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741869_1045 (size=45609) 2024-12-07T00:22:59,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741869_1045 (size=45609) 2024-12-07T00:22:59,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741870_1046 (size=110084) 2024-12-07T00:22:59,996 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741870_1046 (size=110084) 2024-12-07T00:22:59,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741870_1046 (size=110084) 2024-12-07T00:23:00,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741871_1047 (size=1323991) 2024-12-07T00:23:00,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741871_1047 (size=1323991) 2024-12-07T00:23:00,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741871_1047 (size=1323991) 2024-12-07T00:23:00,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741872_1048 (size=23076) 2024-12-07T00:23:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741872_1048 (size=23076) 2024-12-07T00:23:00,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741872_1048 (size=23076) 2024-12-07T00:23:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741873_1049 (size=451756) 2024-12-07T00:23:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741873_1049 (size=451756) 2024-12-07T00:23:00,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741873_1049 (size=451756) 2024-12-07T00:23:00,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741874_1050 (size=126803) 2024-12-07T00:23:00,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741874_1050 (size=126803) 2024-12-07T00:23:00,584 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741874_1050 (size=126803) 2024-12-07T00:23:00,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741875_1051 (size=322274) 2024-12-07T00:23:00,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741875_1051 (size=322274) 2024-12-07T00:23:00,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741875_1051 (size=322274) 2024-12-07T00:23:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741876_1052 (size=1832290) 2024-12-07T00:23:01,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741876_1052 (size=1832290) 2024-12-07T00:23:01,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741876_1052 (size=1832290) 2024-12-07T00:23:01,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741877_1053 (size=30081) 2024-12-07T00:23:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741877_1053 (size=30081) 2024-12-07T00:23:01,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741877_1053 (size=30081) 2024-12-07T00:23:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741878_1054 (size=53616) 2024-12-07T00:23:01,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741878_1054 (size=53616) 2024-12-07T00:23:01,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741878_1054 (size=53616) 2024-12-07T00:23:01,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741879_1055 (size=29229) 2024-12-07T00:23:01,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741879_1055 (size=29229) 2024-12-07T00:23:01,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741879_1055 (size=29229) 2024-12-07T00:23:01,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741880_1056 (size=169089) 2024-12-07T00:23:01,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741880_1056 (size=169089) 2024-12-07T00:23:01,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741880_1056 (size=169089) 2024-12-07T00:23:01,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741881_1057 (size=5175431) 2024-12-07T00:23:01,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741881_1057 (size=5175431) 2024-12-07T00:23:01,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741881_1057 (size=5175431) 2024-12-07T00:23:01,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741882_1058 (size=136454) 2024-12-07T00:23:01,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741882_1058 (size=136454) 2024-12-07T00:23:01,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741882_1058 (size=136454) 2024-12-07T00:23:01,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741883_1059 (size=6350154) 2024-12-07T00:23:01,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741883_1059 (size=6350154) 2024-12-07T00:23:01,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741883_1059 (size=6350154) 2024-12-07T00:23:01,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741884_1060 (size=907851) 2024-12-07T00:23:01,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741884_1060 (size=907851) 2024-12-07T00:23:01,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741884_1060 (size=907851) 2024-12-07T00:23:01,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T00:23:01,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T00:23:01,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741885_1061 (size=3317408) 2024-12-07T00:23:01,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741886_1062 (size=503880) 2024-12-07T00:23:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741886_1062 (size=503880) 2024-12-07T00:23:01,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741886_1062 (size=503880) 2024-12-07T00:23:01,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T00:23:01,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T00:23:01,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741887_1063 (size=4695811) 2024-12-07T00:23:01,551 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:23:01,560 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-12-07T00:23:01,573 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:23:01,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741888_1064 (size=342) 2024-12-07T00:23:01,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741888_1064 (size=342) 2024-12-07T00:23:01,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741888_1064 (size=342) 2024-12-07T00:23:01,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741889_1065 (size=15) 2024-12-07T00:23:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741889_1065 (size=15) 2024-12-07T00:23:01,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741889_1065 (size=15) 2024-12-07T00:23:01,821 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:23:01,834 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741890_1066 (size=304888) 2024-12-07T00:23:01,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741890_1066 (size=304888) 2024-12-07T00:23:01,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741890_1066 (size=304888) 2024-12-07T00:23:02,403 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:02,403 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:02,967 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0001_000001 (auth:SIMPLE) from 127.0.0.1:41868 2024-12-07T00:23:04,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-07T00:23:04,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-12-07T00:23:11,006 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0001_000001 (auth:SIMPLE) from 127.0.0.1:32814 2024-12-07T00:23:11,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741891_1067 (size=350562) 2024-12-07T00:23:11,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741891_1067 (size=350562) 2024-12-07T00:23:11,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741891_1067 (size=350562) 2024-12-07T00:23:12,673 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:23:13,440 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0001_000001 (auth:SIMPLE) from 127.0.0.1:41030 2024-12-07T00:23:19,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741892_1068 (size=8324) 2024-12-07T00:23:19,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741892_1068 (size=8324) 2024-12-07T00:23:19,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741892_1068 (size=8324) 2024-12-07T00:23:19,103 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:23:19,104 INFO [RS-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38358, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:23:19,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741893_1069 (size=5288) 2024-12-07T00:23:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741893_1069 (size=5288) 2024-12-07T00:23:19,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741893_1069 (size=5288) 2024-12-07T00:23:19,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741894_1070 (size=17419) 2024-12-07T00:23:19,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741894_1070 (size=17419) 2024-12-07T00:23:19,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741894_1070 (size=17419) 2024-12-07T00:23:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741895_1071 (size=464) 2024-12-07T00:23:19,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741895_1071 (size=464) 2024-12-07T00:23:19,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741895_1071 (size=464) 2024-12-07T00:23:19,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741896_1072 (size=17419) 2024-12-07T00:23:19,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741896_1072 (size=17419) 2024-12-07T00:23:19,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741896_1072 (size=17419) 2024-12-07T00:23:19,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000002/launch_container.sh] 2024-12-07T00:23:19,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000002/container_tokens] 2024-12-07T00:23:19,402 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000002/sysfs] 2024-12-07T00:23:19,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741897_1073 (size=350562) 2024-12-07T00:23:19,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741897_1073 (size=350562) 2024-12-07T00:23:19,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741897_1073 (size=350562) 2024-12-07T00:23:19,461 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0001_000001 (auth:SIMPLE) from 127.0.0.1:41036 2024-12-07T00:23:20,056 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:23:20,057 INFO [RS-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38362, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:23:20,668 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:23:20,669 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:23:20,679 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: testExportWithTargetName 2024-12-07T00:23:20,680 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:23:20,680 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:23:20,681 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-12-07T00:23:20,681 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-12-07T00:23:20,681 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-12-07T00:23:20,681 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/testExportWithTargetName 2024-12-07T00:23:20,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-12-07T00:23:20,682 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733530977674/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-12-07T00:23:20,693 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithTargetName 2024-12-07T00:23:20,697 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-12-07T00:23:20,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=23, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:20,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T00:23:20,714 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531000713"}]},"ts":"1733531000713"} 2024-12-07T00:23:20,716 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-12-07T00:23:20,719 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-12-07T00:23:20,721 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=24, ppid=23, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-12-07T00:23:20,727 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, UNASSIGN}, {pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, UNASSIGN}] 2024-12-07T00:23:20,729 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=26, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, UNASSIGN 2024-12-07T00:23:20,729 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=25, ppid=24, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, UNASSIGN 2024-12-07T00:23:20,731 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=7eedb80957f4779681b5aac1294bebb9, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:20,731 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=f26d3c291cea0bc39ee497b79e25df36, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:20,734 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:20,735 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:20,735 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=27, ppid=26, state=RUNNABLE; CloseRegionProcedure f26d3c291cea0bc39ee497b79e25df36, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:20,740 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=28, ppid=25, state=RUNNABLE; CloseRegionProcedure 7eedb80957f4779681b5aac1294bebb9, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:23:20,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T00:23:20,892 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:20,893 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:20,895 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(124): Close f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:23:20,895 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1681): Closing f26d3c291cea0bc39ee497b79e25df36, disabling compactions & flushes 2024-12-07T00:23:20,896 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:23:20,896 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(124): Close 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. after waiting 0 ms 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1681): Closing 7eedb80957f4779681b5aac1294bebb9, disabling compactions & flushes 2024-12-07T00:23:20,896 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1703): Closing region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:23:20,896 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:23:20,897 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. after waiting 0 ms 2024-12-07T00:23:20,897 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:23:20,910 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:23:20,913 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:23:20,914 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:20,915 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36. 2024-12-07T00:23:20,915 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] regionserver.HRegion(1635): Region close journal for f26d3c291cea0bc39ee497b79e25df36: 2024-12-07T00:23:20,915 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:20,915 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1922): Closed testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9. 2024-12-07T00:23:20,915 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] regionserver.HRegion(1635): Region close journal for 7eedb80957f4779681b5aac1294bebb9: 2024-12-07T00:23:20,919 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=27}] handler.UnassignRegionHandler(170): Closed f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:23:20,920 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=26 updating hbase:meta row=f26d3c291cea0bc39ee497b79e25df36, regionState=CLOSED 2024-12-07T00:23:20,921 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=28}] handler.UnassignRegionHandler(170): Closed 7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:23:20,921 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=25 updating hbase:meta row=7eedb80957f4779681b5aac1294bebb9, regionState=CLOSED 2024-12-07T00:23:20,928 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=27, resume processing ppid=26 2024-12-07T00:23:20,929 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=27, ppid=26, state=SUCCESS; CloseRegionProcedure f26d3c291cea0bc39ee497b79e25df36, server=6e6711bf2d40,36059,1733530964705 in 187 msec 2024-12-07T00:23:20,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=28, resume processing ppid=25 2024-12-07T00:23:20,931 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=26, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=f26d3c291cea0bc39ee497b79e25df36, UNASSIGN in 201 msec 2024-12-07T00:23:20,931 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=28, ppid=25, state=SUCCESS; CloseRegionProcedure 7eedb80957f4779681b5aac1294bebb9, server=6e6711bf2d40,43281,1733530964846 in 189 msec 2024-12-07T00:23:20,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=25, resume processing ppid=24 2024-12-07T00:23:20,933 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=25, ppid=24, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=7eedb80957f4779681b5aac1294bebb9, UNASSIGN in 202 msec 2024-12-07T00:23:20,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=24, resume processing ppid=23 2024-12-07T00:23:20,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=24, ppid=23, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 213 msec 2024-12-07T00:23:20,939 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531000939"}]},"ts":"1733531000939"} 2024-12-07T00:23:20,942 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-12-07T00:23:20,944 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-12-07T00:23:20,947 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=23, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithTargetName in 246 msec 2024-12-07T00:23:21,014 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=23 2024-12-07T00:23:21,015 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName, procId: 23 completed 2024-12-07T00:23:21,019 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-12-07T00:23:21,027 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,034 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=29, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithTargetName 2024-12-07T00:23:21,036 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=29, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,040 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-12-07T00:23:21,044 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,045 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,047 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T00:23:21,047 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T00:23:21,047 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T00:23:21,047 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-12-07T00:23:21,049 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:23:21,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,050 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-12-07T00:23:21,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,055 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/recovered.edits] 2024-12-07T00:23:21,056 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:23:21,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-07T00:23:21,060 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/recovered.edits] 2024-12-07T00:23:21,067 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/cf/da39cec0da0c4afea945059f9cd49160 2024-12-07T00:23:21,072 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9/recovered.edits/9.seqid 2024-12-07T00:23:21,073 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/7eedb80957f4779681b5aac1294bebb9 2024-12-07T00:23:21,080 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/cf/e378417012fa48989ee3db7d7d3cb77d 2024-12-07T00:23:21,083 DEBUG [BootstrapNodeManager {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-12-07T00:23:21,085 INFO [RS-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38364, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-12-07T00:23:21,090 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36/recovered.edits/9.seqid 2024-12-07T00:23:21,091 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithTargetName/f26d3c291cea0bc39ee497b79e25df36 2024-12-07T00:23:21,091 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-12-07T00:23:21,095 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=29, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,103 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-12-07T00:23:21,109 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-12-07T00:23:21,118 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-12-07T00:23:21,130 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=29, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,130 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-12-07T00:23:21,130 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531001130"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:21,130 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531001130"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:21,135 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:23:21,135 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 7eedb80957f4779681b5aac1294bebb9, NAME => 'testtb-testExportWithTargetName,,1733530975391.7eedb80957f4779681b5aac1294bebb9.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => f26d3c291cea0bc39ee497b79e25df36, NAME => 'testtb-testExportWithTargetName,1,1733530975391.f26d3c291cea0bc39ee497b79e25df36.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:23:21,135 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-12-07T00:23:21,135 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531001135"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:21,158 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-07T00:23:21,161 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithTargetName state from META 2024-12-07T00:23:21,165 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=29, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-12-07T00:23:21,168 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=29, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithTargetName in 144 msec 2024-12-07T00:23:21,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=29 2024-12-07T00:23:21,362 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName, procId: 29 completed 2024-12-07T00:23:21,375 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" 2024-12-07T00:23:21,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-12-07T00:23:21,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" 2024-12-07T00:23:21,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithTargetName 2024-12-07T00:23:21,424 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithTargetName Thread=771 (was 719) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44323 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:52952 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:52420 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:44080 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1157000164_1 at /127.0.0.1:44058 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:44323 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1294 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: htable-pool-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:38977 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool.commonPool-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: htable-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #0 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38977 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 18636) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=794 (was 765) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=643 (was 523) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=4317 (was 5941) 2024-12-07T00:23:21,425 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=771 is superior to 500 2024-12-07T00:23:21,447 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=771, OpenFileDescriptor=794, MaxFileDescriptor=1048576, SystemLoadAverage=643, ProcessCount=17, AvailableMemoryMB=4313 2024-12-07T00:23:21,447 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=771 is superior to 500 2024-12-07T00:23:21,450 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:23:21,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:21,453 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:23:21,453 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:21,453 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 30 2024-12-07T00:23:21,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T00:23:21,454 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:23:21,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741898_1074 (size=404) 2024-12-07T00:23:21,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741898_1074 (size=404) 2024-12-07T00:23:21,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741898_1074 (size=404) 2024-12-07T00:23:21,469 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 61c177d8522b4160c7a2d82d13452227, NAME => 'testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:21,469 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => d76bbe949105469222eaaf6c93c105b5, NAME => 'testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:21,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741899_1075 (size=65) 2024-12-07T00:23:21,498 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741899_1075 (size=65) 2024-12-07T00:23:21,499 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741899_1075 (size=65) 2024-12-07T00:23:21,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741900_1076 (size=65) 2024-12-07T00:23:21,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741900_1076 (size=65) 2024-12-07T00:23:21,500 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:21,500 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing 61c177d8522b4160c7a2d82d13452227, disabling compactions & flushes 2024-12-07T00:23:21,500 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741900_1076 (size=65) 2024-12-07T00:23:21,500 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,500 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. after waiting 0 ms 2024-12-07T00:23:21,500 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,500 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for 61c177d8522b4160c7a2d82d13452227: 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing d76bbe949105469222eaaf6c93c105b5, disabling compactions & flushes 2024-12-07T00:23:21,501 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. after waiting 0 ms 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,501 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,501 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for d76bbe949105469222eaaf6c93c105b5: 2024-12-07T00:23:21,503 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:23:21,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531001503"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531001503"}]},"ts":"1733531001503"} 2024-12-07T00:23:21,503 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531001503"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531001503"}]},"ts":"1733531001503"} 2024-12-07T00:23:21,507 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:23:21,508 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:23:21,508 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531001508"}]},"ts":"1733531001508"} 2024-12-07T00:23:21,510 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-07T00:23:21,515 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:23:21,517 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:23:21,517 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:23:21,517 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:23:21,517 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:23:21,517 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:23:21,517 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:23:21,517 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:23:21,518 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, ASSIGN}, {pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, ASSIGN}] 2024-12-07T00:23:21,519 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, ASSIGN 2024-12-07T00:23:21,520 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, ASSIGN 2024-12-07T00:23:21,521 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=31, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:23:21,522 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=32, ppid=30, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:23:21,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T00:23:21,672 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:23:21,672 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=d76bbe949105469222eaaf6c93c105b5, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:21,672 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=61c177d8522b4160c7a2d82d13452227, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:21,675 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=33, ppid=31, state=RUNNABLE; OpenRegionProcedure 61c177d8522b4160c7a2d82d13452227, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:21,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=34, ppid=32, state=RUNNABLE; OpenRegionProcedure d76bbe949105469222eaaf6c93c105b5, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:23:21,757 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T00:23:21,829 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:21,829 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:21,835 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,836 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7285): Opening region: {ENCODED => 61c177d8522b4160c7a2d82d13452227, NAME => 'testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:23:21,836 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. service=AccessControlService 2024-12-07T00:23:21,837 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:21,837 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,837 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:21,837 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7327): checking encryption for 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,837 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(7330): checking classloading for 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,845 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(135): Open testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,845 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7285): Opening region: {ENCODED => d76bbe949105469222eaaf6c93c105b5, NAME => 'testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:23:21,845 INFO [StoreOpener-61c177d8522b4160c7a2d82d13452227-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,846 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. service=AccessControlService 2024-12-07T00:23:21,846 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:21,846 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,846 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(894): Instantiated testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:21,846 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7327): checking encryption for d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,846 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(7330): checking classloading for d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,854 INFO [StoreOpener-61c177d8522b4160c7a2d82d13452227-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 61c177d8522b4160c7a2d82d13452227 columnFamilyName cf 2024-12-07T00:23:21,854 INFO [StoreOpener-d76bbe949105469222eaaf6c93c105b5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,854 DEBUG [StoreOpener-61c177d8522b4160c7a2d82d13452227-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:21,855 INFO [StoreOpener-61c177d8522b4160c7a2d82d13452227-1 {}] regionserver.HStore(327): Store=61c177d8522b4160c7a2d82d13452227/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:21,856 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,857 INFO [StoreOpener-d76bbe949105469222eaaf6c93c105b5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d76bbe949105469222eaaf6c93c105b5 columnFamilyName cf 2024-12-07T00:23:21,857 DEBUG [StoreOpener-d76bbe949105469222eaaf6c93c105b5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:21,858 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,858 INFO [StoreOpener-d76bbe949105469222eaaf6c93c105b5-1 {}] regionserver.HStore(327): Store=d76bbe949105469222eaaf6c93c105b5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:21,864 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,865 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,866 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1085): writing seq id for 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:21,868 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1085): writing seq id for d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:21,869 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:21,870 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1102): Opened 61c177d8522b4160c7a2d82d13452227; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70675679, jitterRate=0.05314968526363373}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:21,871 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegion(1001): Region open journal for 61c177d8522b4160c7a2d82d13452227: 2024-12-07T00:23:21,873 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227., pid=33, masterSystemTime=1733531001829 2024-12-07T00:23:21,873 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:21,874 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1102): Opened d76bbe949105469222eaaf6c93c105b5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68153481, jitterRate=0.015566006302833557}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:21,874 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegion(1001): Region open journal for d76bbe949105469222eaaf6c93c105b5: 2024-12-07T00:23:21,875 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5., pid=34, masterSystemTime=1733531001829 2024-12-07T00:23:21,875 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,875 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=33}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:21,876 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=31 updating hbase:meta row=61c177d8522b4160c7a2d82d13452227, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:21,877 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,877 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=34}] handler.AssignRegionHandler(164): Opened testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:21,878 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=32 updating hbase:meta row=d76bbe949105469222eaaf6c93c105b5, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:21,882 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=33, resume processing ppid=31 2024-12-07T00:23:21,882 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=34, resume processing ppid=32 2024-12-07T00:23:21,883 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=34, ppid=32, state=SUCCESS; OpenRegionProcedure d76bbe949105469222eaaf6c93c105b5, server=6e6711bf2d40,33095,1733530964767 in 204 msec 2024-12-07T00:23:21,883 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=33, ppid=31, state=SUCCESS; OpenRegionProcedure 61c177d8522b4160c7a2d82d13452227, server=6e6711bf2d40,36059,1733530964705 in 204 msec 2024-12-07T00:23:21,883 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=31, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, ASSIGN in 365 msec 2024-12-07T00:23:21,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=32, resume processing ppid=30 2024-12-07T00:23:21,885 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=32, ppid=30, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, ASSIGN in 366 msec 2024-12-07T00:23:21,886 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:23:21,886 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531001886"}]},"ts":"1733531001886"} 2024-12-07T00:23:21,887 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-07T00:23:21,891 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=30, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:23:21,891 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-12-07T00:23:21,894 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T00:23:21,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,895 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:21,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:21,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:21,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:21,898 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:21,900 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=30, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithResetTtl in 448 msec 2024-12-07T00:23:22,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=30 2024-12-07T00:23:22,058 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl, procId: 30 completed 2024-12-07T00:23:22,058 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-07T00:23:22,059 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:22,063 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-07T00:23:22,064 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:22,064 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithResetTtl assigned. 2024-12-07T00:23:22,068 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T00:23:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531002068 (current time:1733531002068). 2024-12-07T00:23:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:23:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T00:23:22,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:23:22,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x604b42ea to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@7e854bfa 2024-12-07T00:23:22,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@106db6f6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:22,076 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,077 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41416, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x604b42ea to 127.0.0.1:60354 2024-12-07T00:23:22,079 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:22,080 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1aff505d to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4e710263 2024-12-07T00:23:22,084 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33d6c75d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:22,085 DEBUG [hconnection-0x5b72a72b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,087 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41424, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,089 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,090 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49024, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1aff505d to 127.0.0.1:60354 2024-12-07T00:23:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:22,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T00:23:22,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:23:22,093 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=35, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T00:23:22,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-07T00:23:22,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T00:23:22,095 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:23:22,096 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:23:22,100 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:23:22,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741901_1077 (size=161) 2024-12-07T00:23:22,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741901_1077 (size=161) 2024-12-07T00:23:22,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741901_1077 (size=161) 2024-12-07T00:23:22,111 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:23:22,111 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227}, {pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5}] 2024-12-07T00:23:22,112 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,112 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T00:23:22,264 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:22,264 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:22,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=37 2024-12-07T00:23:22,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=36 2024-12-07T00:23:22,265 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.HRegion(2538): Flush status journal for d76bbe949105469222eaaf6c93c105b5: 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.HRegion(2538): Flush status journal for 61c177d8522b4160c7a2d82d13452227: 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. for emptySnaptb0-testExportWithResetTtl completed. 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:22,266 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:23:22,289 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741902_1078 (size=68) 2024-12-07T00:23:22,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741902_1078 (size=68) 2024-12-07T00:23:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741902_1078 (size=68) 2024-12-07T00:23:22,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741903_1079 (size=68) 2024-12-07T00:23:22,291 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=37}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=37 2024-12-07T00:23:22,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741903_1079 (size=68) 2024-12-07T00:23:22,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=37 2024-12-07T00:23:22,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741903_1079 (size=68) 2024-12-07T00:23:22,292 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=37, ppid=35, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:22,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=36}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=36 2024-12-07T00:23:22,294 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=36 2024-12-07T00:23:22,294 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,295 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=36, ppid=35, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,296 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=37, ppid=35, state=SUCCESS; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 in 183 msec 2024-12-07T00:23:22,299 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=36, resume processing ppid=35 2024-12-07T00:23:22,299 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:23:22,299 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=36, ppid=35, state=SUCCESS; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 in 185 msec 2024-12-07T00:23:22,300 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:23:22,302 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:23:22,302 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:22,303 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:22,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741904_1080 (size=543) 2024-12-07T00:23:22,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741904_1080 (size=543) 2024-12-07T00:23:22,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741904_1080 (size=543) 2024-12-07T00:23:22,343 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:23:22,351 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:23:22,351 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:22,354 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=35, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:23:22,354 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 35 2024-12-07T00:23:22,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=35, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=35, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 262 msec 2024-12-07T00:23:22,398 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=35 2024-12-07T00:23:22,398 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 35 completed 2024-12-07T00:23:22,410 DEBUG [htable-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,411 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:22,413 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49034, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,417 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:22,425 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithResetTtl 2024-12-07T00:23:22,425 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:22,426 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:22,448 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T00:23:22,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531002449 (current time:1733531002449). 2024-12-07T00:23:22,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:23:22,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T00:23:22,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:23:22,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x78a2ca6f to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@36a761af 2024-12-07T00:23:22,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@76c5aca2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:22,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,459 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41436, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x78a2ca6f to 127.0.0.1:60354 2024-12-07T00:23:22,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:22,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x6b5d0dda to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4fb4f7e3 2024-12-07T00:23:22,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cb31393, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:22,472 DEBUG [hconnection-0x14d0f809-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,474 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41452, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,476 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:22,477 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49038, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x6b5d0dda to 127.0.0.1:60354 2024-12-07T00:23:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:22,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T00:23:22,480 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:23:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=38, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-12-07T00:23:22,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-07T00:23:22,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T00:23:22,486 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:23:22,487 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:23:22,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:23:22,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741905_1081 (size=156) 2024-12-07T00:23:22,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741905_1081 (size=156) 2024-12-07T00:23:22,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741905_1081 (size=156) 2024-12-07T00:23:22,506 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:23:22,506 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227}, {pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5}] 2024-12-07T00:23:22,508 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,509 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T00:23:22,660 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:22,660 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:22,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=39 2024-12-07T00:23:22,661 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=40 2024-12-07T00:23:22,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:22,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:22,661 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2837): Flushing 61c177d8522b4160c7a2d82d13452227 1/1 column families, dataSize=199 B heapSize=688 B 2024-12-07T00:23:22,662 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2837): Flushing d76bbe949105469222eaaf6c93c105b5 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-12-07T00:23:22,679 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/.tmp/cf/e3488d5b25ab459c96a5777b59dfbf0f is 71, key is 07e0627ae33dbdc107a515debc785c44/cf:q/1733531002411/Put/seqid=0 2024-12-07T00:23:22,681 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/.tmp/cf/ec3dcf03540b425db7a6e81d4085a652 is 71, key is 12918571b23b15c99de027c5d9cf1547/cf:q/1733531002417/Put/seqid=0 2024-12-07T00:23:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741906_1082 (size=5286) 2024-12-07T00:23:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741906_1082 (size=5286) 2024-12-07T00:23:22,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741906_1082 (size=5286) 2024-12-07T00:23:22,695 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=199 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/.tmp/cf/e3488d5b25ab459c96a5777b59dfbf0f 2024-12-07T00:23:22,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741907_1083 (size=8324) 2024-12-07T00:23:22,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741907_1083 (size=8324) 2024-12-07T00:23:22,700 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.06 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/.tmp/cf/ec3dcf03540b425db7a6e81d4085a652 2024-12-07T00:23:22,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741907_1083 (size=8324) 2024-12-07T00:23:22,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/.tmp/cf/e3488d5b25ab459c96a5777b59dfbf0f as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f 2024-12-07T00:23:22,711 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f, entries=3, sequenceid=6, filesize=5.2 K 2024-12-07T00:23:22,712 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(3040): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 61c177d8522b4160c7a2d82d13452227 in 51ms, sequenceid=6, compaction requested=false 2024-12-07T00:23:22,712 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-12-07T00:23:22,713 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.HRegion(2538): Flush status journal for 61c177d8522b4160c7a2d82d13452227: 2024-12-07T00:23:22,713 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. for snaptb0-testExportWithResetTtl completed. 2024-12-07T00:23:22,713 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:22,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f] hfiles 2024-12-07T00:23:22,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741908_1084 (size=107) 2024-12-07T00:23:22,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741908_1084 (size=107) 2024-12-07T00:23:22,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741908_1084 (size=107) 2024-12-07T00:23:22,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:22,726 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=39}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=39 2024-12-07T00:23:22,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/.tmp/cf/ec3dcf03540b425db7a6e81d4085a652 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652 2024-12-07T00:23:22,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=39 2024-12-07T00:23:22,728 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,728 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=39, ppid=38, state=RUNNABLE; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:22,732 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=39, ppid=38, state=SUCCESS; SnapshotRegionProcedure 61c177d8522b4160c7a2d82d13452227 in 224 msec 2024-12-07T00:23:22,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652, entries=47, sequenceid=6, filesize=8.1 K 2024-12-07T00:23:22,736 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(3040): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for d76bbe949105469222eaaf6c93c105b5 in 74ms, sequenceid=6, compaction requested=false 2024-12-07T00:23:22,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.HRegion(2538): Flush status journal for d76bbe949105469222eaaf6c93c105b5: 2024-12-07T00:23:22,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. for snaptb0-testExportWithResetTtl completed. 2024-12-07T00:23:22,736 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:22,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652] hfiles 2024-12-07T00:23:22,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652 for snapshot=snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741909_1085 (size=107) 2024-12-07T00:23:22,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741909_1085 (size=107) 2024-12-07T00:23:22,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741909_1085 (size=107) 2024-12-07T00:23:22,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:22,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=40}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=40 2024-12-07T00:23:22,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=40 2024-12-07T00:23:22,746 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,747 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=40, ppid=38, state=RUNNABLE; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:22,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=40, resume processing ppid=38 2024-12-07T00:23:22,752 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:23:22,752 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=40, ppid=38, state=SUCCESS; SnapshotRegionProcedure d76bbe949105469222eaaf6c93c105b5 in 242 msec 2024-12-07T00:23:22,753 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:23:22,754 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:23:22,754 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,755 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741910_1086 (size=621) 2024-12-07T00:23:22,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741910_1086 (size=621) 2024-12-07T00:23:22,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741910_1086 (size=621) 2024-12-07T00:23:22,774 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:23:22,782 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:23:22,782 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-12-07T00:23:22,784 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=38, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:23:22,784 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 38 2024-12-07T00:23:22,785 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=38, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=38, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 304 msec 2024-12-07T00:23:22,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=38 2024-12-07T00:23:22,786 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl, procId: 38 completed 2024-12-07T00:23:22,788 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:23:22,789 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:22,790 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:23:22,791 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:22,791 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 41 2024-12-07T00:23:22,792 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:23:22,792 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T00:23:22,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741911_1087 (size=397) 2024-12-07T00:23:22,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741911_1087 (size=397) 2024-12-07T00:23:22,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741911_1087 (size=397) 2024-12-07T00:23:22,803 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => fc29d31c074b28990b2a7e371a24dcc7, NAME => 'testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:22,804 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 97494782415195dbc1a7bbca0b1fac6b, NAME => 'testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741913_1089 (size=58) 2024-12-07T00:23:22,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741913_1089 (size=58) 2024-12-07T00:23:22,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741913_1089 (size=58) 2024-12-07T00:23:22,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741912_1088 (size=58) 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:22,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741912_1088 (size=58) 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1681): Closing fc29d31c074b28990b2a7e371a24dcc7, disabling compactions & flushes 2024-12-07T00:23:22,820 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. after waiting 0 ms 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:22,820 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:22,820 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1635): Region close journal for fc29d31c074b28990b2a7e371a24dcc7: 2024-12-07T00:23:22,821 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741912_1088 (size=58) 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1681): Closing 97494782415195dbc1a7bbca0b1fac6b, disabling compactions & flushes 2024-12-07T00:23:22,821 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. after waiting 0 ms 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:22,821 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:22,821 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1635): Region close journal for 97494782415195dbc1a7bbca0b1fac6b: 2024-12-07T00:23:22,823 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:23:22,823 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733531002823"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531002823"}]},"ts":"1733531002823"} 2024-12-07T00:23:22,823 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1733531002823"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531002823"}]},"ts":"1733531002823"} 2024-12-07T00:23:22,826 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:23:22,827 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:23:22,827 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531002827"}]},"ts":"1733531002827"} 2024-12-07T00:23:22,829 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-12-07T00:23:22,833 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:23:22,834 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:23:22,834 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:23:22,834 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:23:22,834 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:23:22,834 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:23:22,834 INFO [PEWorker-2 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:23:22,834 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:23:22,834 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, ASSIGN}, {pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, ASSIGN}] 2024-12-07T00:23:22,836 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, ASSIGN 2024-12-07T00:23:22,836 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, ASSIGN 2024-12-07T00:23:22,837 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=43, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:23:22,838 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=42, ppid=41, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:23:22,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T00:23:22,988 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:23:22,988 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=97494782415195dbc1a7bbca0b1fac6b, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:22,988 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fc29d31c074b28990b2a7e371a24dcc7, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:22,990 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=44, ppid=42, state=RUNNABLE; OpenRegionProcedure fc29d31c074b28990b2a7e371a24dcc7, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:23:22,991 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=45, ppid=43, state=RUNNABLE; OpenRegionProcedure 97494782415195dbc1a7bbca0b1fac6b, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:23,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T00:23:23,143 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:23,145 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:23,147 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,147 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7285): Opening region: {ENCODED => fc29d31c074b28990b2a7e371a24dcc7, NAME => 'testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:23:23,147 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. service=AccessControlService 2024-12-07T00:23:23,147 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:23,148 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,148 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:23,148 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7327): checking encryption for fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,148 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(7330): checking classloading for fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,148 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(135): Open testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:23,148 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7285): Opening region: {ENCODED => 97494782415195dbc1a7bbca0b1fac6b, NAME => 'testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:23:23,149 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. service=AccessControlService 2024-12-07T00:23:23,149 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:23,149 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,149 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(894): Instantiated testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:23,149 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7327): checking encryption for 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,149 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(7330): checking classloading for 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,149 INFO [StoreOpener-fc29d31c074b28990b2a7e371a24dcc7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,151 INFO [StoreOpener-fc29d31c074b28990b2a7e371a24dcc7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fc29d31c074b28990b2a7e371a24dcc7 columnFamilyName cf 2024-12-07T00:23:23,151 DEBUG [StoreOpener-fc29d31c074b28990b2a7e371a24dcc7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:23,152 INFO [StoreOpener-fc29d31c074b28990b2a7e371a24dcc7-1 {}] regionserver.HStore(327): Store=fc29d31c074b28990b2a7e371a24dcc7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:23,152 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,153 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,155 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1085): writing seq id for fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,157 INFO [StoreOpener-97494782415195dbc1a7bbca0b1fac6b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,158 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:23,158 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1102): Opened fc29d31c074b28990b2a7e371a24dcc7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66902963, jitterRate=-0.0030681639909744263}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:23,159 INFO [StoreOpener-97494782415195dbc1a7bbca0b1fac6b-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97494782415195dbc1a7bbca0b1fac6b columnFamilyName cf 2024-12-07T00:23:23,159 DEBUG [StoreOpener-97494782415195dbc1a7bbca0b1fac6b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:23,159 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegion(1001): Region open journal for fc29d31c074b28990b2a7e371a24dcc7: 2024-12-07T00:23:23,159 INFO [StoreOpener-97494782415195dbc1a7bbca0b1fac6b-1 {}] regionserver.HStore(327): Store=97494782415195dbc1a7bbca0b1fac6b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:23,160 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7., pid=44, masterSystemTime=1733531003142 2024-12-07T00:23:23,160 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,161 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,162 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,162 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=44}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,163 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=42 updating hbase:meta row=fc29d31c074b28990b2a7e371a24dcc7, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:23,163 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1085): writing seq id for 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,167 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:23,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=44, resume processing ppid=42 2024-12-07T00:23:23,167 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=44, ppid=42, state=SUCCESS; OpenRegionProcedure fc29d31c074b28990b2a7e371a24dcc7, server=6e6711bf2d40,43281,1733530964846 in 175 msec 2024-12-07T00:23:23,167 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1102): Opened 97494782415195dbc1a7bbca0b1fac6b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67467473, jitterRate=0.005343690514564514}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:23,168 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegion(1001): Region open journal for 97494782415195dbc1a7bbca0b1fac6b: 2024-12-07T00:23:23,169 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b., pid=45, masterSystemTime=1733531003145 2024-12-07T00:23:23,169 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=42, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, ASSIGN in 333 msec 2024-12-07T00:23:23,170 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:23,170 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=45}] handler.AssignRegionHandler(164): Opened testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:23,171 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=43 updating hbase:meta row=97494782415195dbc1a7bbca0b1fac6b, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:23,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=45, resume processing ppid=43 2024-12-07T00:23:23,175 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=45, ppid=43, state=SUCCESS; OpenRegionProcedure 97494782415195dbc1a7bbca0b1fac6b, server=6e6711bf2d40,36059,1733530964705 in 181 msec 2024-12-07T00:23:23,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=43, resume processing ppid=41 2024-12-07T00:23:23,177 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=43, ppid=41, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, ASSIGN in 341 msec 2024-12-07T00:23:23,178 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:23:23,178 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531003178"}]},"ts":"1733531003178"} 2024-12-07T00:23:23,180 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-12-07T00:23:23,183 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=41, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:23:23,184 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-12-07T00:23:23,186 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T00:23:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:23,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:23,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,191 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,192 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:23,193 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=41, state=SUCCESS; CreateTableProcedure table=testExportWithResetTtl in 403 msec 2024-12-07T00:23:23,396 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=41 2024-12-07T00:23:23,396 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportWithResetTtl, procId: 41 completed 2024-12-07T00:23:23,397 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportWithResetTtl get assigned. Timeout = 60000ms 2024-12-07T00:23:23,397 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:23,401 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportWithResetTtl assigned to meta. Checking AM states. 2024-12-07T00:23:23,402 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:23,402 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportWithResetTtl assigned. 2024-12-07T00:23:23,414 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:23,419 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:23,423 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportWithResetTtl 2024-12-07T00:23:23,423 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,424 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:23,437 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-07T00:23:23,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531003437 (current time:1733531003437). 2024-12-07T00:23:23,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-12-07T00:23:23,437 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:23:23,439 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7fc90064 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@45721f94 2024-12-07T00:23:23,442 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46cf5e5c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:23,443 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:23,444 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39062, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:23,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7fc90064 to 127.0.0.1:60354 2024-12-07T00:23:23,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:23,446 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7cb7e451 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@296ccaec 2024-12-07T00:23:23,449 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@156d5c23, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:23,451 DEBUG [hconnection-0xe87d885-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:23,453 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39078, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:23,455 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:23,456 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:23,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7cb7e451 to 127.0.0.1:60354 2024-12-07T00:23:23,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:23,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-12-07T00:23:23,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:23:23,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=46, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-12-07T00:23:23,459 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-07T00:23:23,460 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:23:23,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T00:23:23,461 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:23:23,464 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:23:23,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741914_1090 (size=143) 2024-12-07T00:23:23,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741914_1090 (size=143) 2024-12-07T00:23:23,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741914_1090 (size=143) 2024-12-07T00:23:23,476 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:23:23,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure fc29d31c074b28990b2a7e371a24dcc7}, {pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 97494782415195dbc1a7bbca0b1fac6b}] 2024-12-07T00:23:23,477 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,477 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T00:23:23,628 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:23,628 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:23,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=48 2024-12-07T00:23:23,629 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=47 2024-12-07T00:23:23,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:23,629 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,629 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2837): Flushing 97494782415195dbc1a7bbca0b1fac6b 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-07T00:23:23,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2837): Flushing fc29d31c074b28990b2a7e371a24dcc7 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-07T00:23:23,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/.tmp/cf/99c1a1fde797405db71f507936615628 is 71, key is 2d1d396b97d00b817bbd9b8046d86bcb/cf:q/1733531003414/Put/seqid=0 2024-12-07T00:23:23,658 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/.tmp/cf/bbf3a37c5dc04febbb6e8b7f125b917e is 71, key is 0c5625105d26c2c4166c8183bd87adbc/cf:q/1733531003419/Put/seqid=0 2024-12-07T00:23:23,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741915_1091 (size=8394) 2024-12-07T00:23:23,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741915_1091 (size=8394) 2024-12-07T00:23:23,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741915_1091 (size=8394) 2024-12-07T00:23:23,715 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/.tmp/cf/99c1a1fde797405db71f507936615628 2024-12-07T00:23:23,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741916_1092 (size=5216) 2024-12-07T00:23:23,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741916_1092 (size=5216) 2024-12-07T00:23:23,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741916_1092 (size=5216) 2024-12-07T00:23:23,724 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/.tmp/cf/bbf3a37c5dc04febbb6e8b7f125b917e 2024-12-07T00:23:23,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/.tmp/cf/99c1a1fde797405db71f507936615628 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628 2024-12-07T00:23:23,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/.tmp/cf/bbf3a37c5dc04febbb6e8b7f125b917e as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e 2024-12-07T00:23:23,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628, entries=48, sequenceid=5, filesize=8.2 K 2024-12-07T00:23:23,737 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 97494782415195dbc1a7bbca0b1fac6b in 108ms, sequenceid=5, compaction requested=false 2024-12-07T00:23:23,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.HRegion(2538): Flush status journal for 97494782415195dbc1a7bbca0b1fac6b: 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. for snaptb-testExportWithResetTtl completed. 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628] hfiles 2024-12-07T00:23:23,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628 for snapshot=snaptb-testExportWithResetTtl 2024-12-07T00:23:23,745 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e, entries=2, sequenceid=5, filesize=5.1 K 2024-12-07T00:23:23,746 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for fc29d31c074b28990b2a7e371a24dcc7 in 116ms, sequenceid=5, compaction requested=false 2024-12-07T00:23:23,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.HRegion(2538): Flush status journal for fc29d31c074b28990b2a7e371a24dcc7: 2024-12-07T00:23:23,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. for snaptb-testExportWithResetTtl completed. 2024-12-07T00:23:23,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-12-07T00:23:23,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:23,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e] hfiles 2024-12-07T00:23:23,747 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e for snapshot=snaptb-testExportWithResetTtl 2024-12-07T00:23:23,763 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T00:23:23,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741917_1093 (size=100) 2024-12-07T00:23:23,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741917_1093 (size=100) 2024-12-07T00:23:23,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741917_1093 (size=100) 2024-12-07T00:23:23,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:23,784 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=48}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=48 2024-12-07T00:23:23,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=48 2024-12-07T00:23:23,785 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,785 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=48, ppid=46, state=RUNNABLE; SnapshotRegionProcedure 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:23,787 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=48, ppid=46, state=SUCCESS; SnapshotRegionProcedure 97494782415195dbc1a7bbca0b1fac6b in 310 msec 2024-12-07T00:23:23,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741918_1094 (size=100) 2024-12-07T00:23:23,795 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741918_1094 (size=100) 2024-12-07T00:23:23,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741918_1094 (size=100) 2024-12-07T00:23:23,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:23,798 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=47}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=47 2024-12-07T00:23:23,799 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=47 2024-12-07T00:23:23,799 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,799 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=47, ppid=46, state=RUNNABLE; SnapshotRegionProcedure fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:23,805 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=47, resume processing ppid=46 2024-12-07T00:23:23,806 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=47, ppid=46, state=SUCCESS; SnapshotRegionProcedure fc29d31c074b28990b2a7e371a24dcc7 in 324 msec 2024-12-07T00:23:23,806 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:23:23,807 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:23:23,807 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:23:23,807 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-12-07T00:23:23,808 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T00:23:23,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741919_1095 (size=600) 2024-12-07T00:23:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741919_1095 (size=600) 2024-12-07T00:23:23,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741919_1095 (size=600) 2024-12-07T00:23:23,858 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:23:23,865 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:23:23,865 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T00:23:23,867 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=46, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:23:23,867 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 46 2024-12-07T00:23:23,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=46, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=46, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 410 msec 2024-12-07T00:23:24,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=46 2024-12-07T00:23:24,065 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl, procId: 46 completed 2024-12-07T00:23:24,079 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079 2024-12-07T00:23:24,079 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:24,126 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:24,126 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T00:23:24,129 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:23:24,136 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-12-07T00:23:24,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741920_1096 (size=143) 2024-12-07T00:23:24,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741920_1096 (size=143) 2024-12-07T00:23:24,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741920_1096 (size=143) 2024-12-07T00:23:24,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741921_1097 (size=600) 2024-12-07T00:23:24,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741921_1097 (size=600) 2024-12-07T00:23:24,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741921_1097 (size=600) 2024-12-07T00:23:24,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741922_1098 (size=141) 2024-12-07T00:23:24,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741922_1098 (size=141) 2024-12-07T00:23:24,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741922_1098 (size=141) 2024-12-07T00:23:24,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:24,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:24,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-07T00:23:24,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-07T00:23:24,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-07T00:23:24,378 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-12-07T00:23:24,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-12-07T00:23:25,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-539788058114673081.jar 2024-12-07T00:23:25,347 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,348 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,431 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-1357928488222301702.jar 2024-12-07T00:23:25,431 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,432 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,432 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,433 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:25,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:23:25,434 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:23:25,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:23:25,435 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:23:25,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:23:25,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:23:25,436 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:23:25,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:23:25,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:23:25,437 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:23:25,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:23:25,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:23:25,438 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:25,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:25,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:25,439 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:25,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:25,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:25,440 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741923_1099 (size=127628) 2024-12-07T00:23:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741923_1099 (size=127628) 2024-12-07T00:23:25,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741923_1099 (size=127628) 2024-12-07T00:23:25,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T00:23:25,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T00:23:25,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741924_1100 (size=2172101) 2024-12-07T00:23:25,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741925_1101 (size=213228) 2024-12-07T00:23:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741925_1101 (size=213228) 2024-12-07T00:23:25,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741925_1101 (size=213228) 2024-12-07T00:23:25,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T00:23:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T00:23:25,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741926_1102 (size=1877034) 2024-12-07T00:23:25,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741927_1103 (size=533455) 2024-12-07T00:23:25,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741927_1103 (size=533455) 2024-12-07T00:23:25,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741927_1103 (size=533455) 2024-12-07T00:23:25,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T00:23:25,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T00:23:25,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741928_1104 (size=7280644) 2024-12-07T00:23:25,644 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0001_000001 (auth:SIMPLE) from 127.0.0.1:37466 2024-12-07T00:23:25,652 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000001/launch_container.sh] 2024-12-07T00:23:25,652 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000001/container_tokens] 2024-12-07T00:23:25,652 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0001/container_1733530972728_0001_01_000001/sysfs] 2024-12-07T00:23:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T00:23:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T00:23:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741929_1105 (size=4188619) 2024-12-07T00:23:25,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741930_1106 (size=20406) 2024-12-07T00:23:25,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741930_1106 (size=20406) 2024-12-07T00:23:25,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741930_1106 (size=20406) 2024-12-07T00:23:25,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741931_1107 (size=75495) 2024-12-07T00:23:25,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741931_1107 (size=75495) 2024-12-07T00:23:25,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741931_1107 (size=75495) 2024-12-07T00:23:25,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741932_1108 (size=45609) 2024-12-07T00:23:25,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741932_1108 (size=45609) 2024-12-07T00:23:25,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741932_1108 (size=45609) 2024-12-07T00:23:25,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741933_1109 (size=110084) 2024-12-07T00:23:25,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741933_1109 (size=110084) 2024-12-07T00:23:25,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741933_1109 (size=110084) 2024-12-07T00:23:25,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741934_1110 (size=6350154) 2024-12-07T00:23:25,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741934_1110 (size=6350154) 2024-12-07T00:23:25,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741934_1110 (size=6350154) 2024-12-07T00:23:25,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T00:23:25,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T00:23:25,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741935_1111 (size=1323991) 2024-12-07T00:23:25,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741936_1112 (size=23076) 2024-12-07T00:23:25,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741936_1112 (size=23076) 2024-12-07T00:23:25,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741936_1112 (size=23076) 2024-12-07T00:23:25,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741937_1113 (size=126803) 2024-12-07T00:23:25,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741937_1113 (size=126803) 2024-12-07T00:23:25,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741937_1113 (size=126803) 2024-12-07T00:23:25,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741938_1114 (size=322274) 2024-12-07T00:23:25,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741938_1114 (size=322274) 2024-12-07T00:23:25,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741938_1114 (size=322274) 2024-12-07T00:23:25,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741939_1115 (size=451756) 2024-12-07T00:23:25,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741939_1115 (size=451756) 2024-12-07T00:23:25,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741939_1115 (size=451756) 2024-12-07T00:23:25,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741940_1116 (size=1832290) 2024-12-07T00:23:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741940_1116 (size=1832290) 2024-12-07T00:23:25,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741940_1116 (size=1832290) 2024-12-07T00:23:25,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741941_1117 (size=30081) 2024-12-07T00:23:25,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741941_1117 (size=30081) 2024-12-07T00:23:25,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741941_1117 (size=30081) 2024-12-07T00:23:25,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741942_1118 (size=53616) 2024-12-07T00:23:25,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741942_1118 (size=53616) 2024-12-07T00:23:25,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741942_1118 (size=53616) 2024-12-07T00:23:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741943_1119 (size=29229) 2024-12-07T00:23:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741943_1119 (size=29229) 2024-12-07T00:23:25,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741943_1119 (size=29229) 2024-12-07T00:23:25,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741944_1120 (size=169089) 2024-12-07T00:23:25,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741944_1120 (size=169089) 2024-12-07T00:23:25,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741944_1120 (size=169089) 2024-12-07T00:23:26,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T00:23:26,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T00:23:26,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741945_1121 (size=5175431) 2024-12-07T00:23:26,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741946_1122 (size=136454) 2024-12-07T00:23:26,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741946_1122 (size=136454) 2024-12-07T00:23:26,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741946_1122 (size=136454) 2024-12-07T00:23:26,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741947_1123 (size=907851) 2024-12-07T00:23:26,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741947_1123 (size=907851) 2024-12-07T00:23:26,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741947_1123 (size=907851) 2024-12-07T00:23:26,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T00:23:26,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T00:23:26,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741948_1124 (size=3317408) 2024-12-07T00:23:26,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741949_1125 (size=503880) 2024-12-07T00:23:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741949_1125 (size=503880) 2024-12-07T00:23:26,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741949_1125 (size=503880) 2024-12-07T00:23:26,245 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T00:23:26,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T00:23:26,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741950_1126 (size=4695811) 2024-12-07T00:23:26,248 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:23:26,252 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-12-07T00:23:26,255 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:23:26,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741951_1127 (size=324) 2024-12-07T00:23:26,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741951_1127 (size=324) 2024-12-07T00:23:26,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741951_1127 (size=324) 2024-12-07T00:23:26,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741952_1128 (size=15) 2024-12-07T00:23:26,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741952_1128 (size=15) 2024-12-07T00:23:26,308 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741952_1128 (size=15) 2024-12-07T00:23:26,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741953_1129 (size=304875) 2024-12-07T00:23:26,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741953_1129 (size=304875) 2024-12-07T00:23:26,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741953_1129 (size=304875) 2024-12-07T00:23:26,382 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:26,382 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:26,523 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:23:26,637 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0002_000001 (auth:SIMPLE) from 127.0.0.1:37474 2024-12-07T00:23:30,556 INFO [master/6e6711bf2d40:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-12-07T00:23:30,556 INFO [master/6e6711bf2d40:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-12-07T00:23:33,873 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0002_000001 (auth:SIMPLE) from 127.0.0.1:39928 2024-12-07T00:23:34,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741954_1130 (size=350549) 2024-12-07T00:23:34,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741954_1130 (size=350549) 2024-12-07T00:23:34,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741954_1130 (size=350549) 2024-12-07T00:23:36,193 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0002_000001 (auth:SIMPLE) from 127.0.0.1:48056 2024-12-07T00:23:41,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741955_1131 (size=8394) 2024-12-07T00:23:41,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741955_1131 (size=8394) 2024-12-07T00:23:41,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741955_1131 (size=8394) 2024-12-07T00:23:41,407 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741956_1132 (size=5216) 2024-12-07T00:23:41,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741956_1132 (size=5216) 2024-12-07T00:23:41,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741956_1132 (size=5216) 2024-12-07T00:23:41,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741957_1133 (size=17398) 2024-12-07T00:23:41,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741957_1133 (size=17398) 2024-12-07T00:23:41,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741957_1133 (size=17398) 2024-12-07T00:23:41,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741958_1134 (size=461) 2024-12-07T00:23:41,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741958_1134 (size=461) 2024-12-07T00:23:41,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741958_1134 (size=461) 2024-12-07T00:23:41,585 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000002/launch_container.sh] 2024-12-07T00:23:41,585 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000002/container_tokens] 2024-12-07T00:23:41,585 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000002/sysfs] 2024-12-07T00:23:41,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741959_1135 (size=17398) 2024-12-07T00:23:41,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741959_1135 (size=17398) 2024-12-07T00:23:41,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741959_1135 (size=17398) 2024-12-07T00:23:41,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741960_1136 (size=350549) 2024-12-07T00:23:41,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741960_1136 (size=350549) 2024-12-07T00:23:41,677 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741960_1136 (size=350549) 2024-12-07T00:23:41,696 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0002_000001 (auth:SIMPLE) from 127.0.0.1:48070 2024-12-07T00:23:42,673 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:23:43,632 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:23:43,633 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:23:43,641 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb-testExportWithResetTtl 2024-12-07T00:23:43,641 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:23:43,642 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:23:43,642 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T00:23:43,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-07T00:23:43,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-07T00:23:43,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-12-07T00:23:43,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-12-07T00:23:43,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531004079/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-12-07T00:23:43,658 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testExportWithResetTtl 2024-12-07T00:23:43,659 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-12-07T00:23:43,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=49, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:43,675 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531023674"}]},"ts":"1733531023674"} 2024-12-07T00:23:43,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T00:23:43,678 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-07T00:23:43,681 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-12-07T00:23:43,682 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=50, ppid=49, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-12-07T00:23:43,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, UNASSIGN}, {pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, UNASSIGN}] 2024-12-07T00:23:43,687 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=52, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, UNASSIGN 2024-12-07T00:23:43,687 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=51, ppid=50, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, UNASSIGN 2024-12-07T00:23:43,688 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=97494782415195dbc1a7bbca0b1fac6b, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:43,689 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=fc29d31c074b28990b2a7e371a24dcc7, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:43,691 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:43,691 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=53, ppid=52, state=RUNNABLE; CloseRegionProcedure 97494782415195dbc1a7bbca0b1fac6b, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:43,694 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:43,696 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=54, ppid=51, state=RUNNABLE; CloseRegionProcedure fc29d31c074b28990b2a7e371a24dcc7, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:23:43,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T00:23:43,847 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:43,848 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(124): Close 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:43,849 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:43,849 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1681): Closing 97494782415195dbc1a7bbca0b1fac6b, disabling compactions & flushes 2024-12-07T00:23:43,849 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:43,849 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:43,849 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. after waiting 0 ms 2024-12-07T00:23:43,849 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:43,849 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:43,850 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(124): Close fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:43,850 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:43,850 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1681): Closing fc29d31c074b28990b2a7e371a24dcc7, disabling compactions & flushes 2024-12-07T00:23:43,850 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1703): Closing region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:43,851 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:43,851 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1791): Acquired close lock on testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. after waiting 0 ms 2024-12-07T00:23:43,851 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1801): Updates disabled for region testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:43,885 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:23:43,886 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:43,886 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1922): Closed testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7. 2024-12-07T00:23:43,886 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] regionserver.HRegion(1635): Region close journal for fc29d31c074b28990b2a7e371a24dcc7: 2024-12-07T00:23:43,887 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:23:43,888 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:43,889 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=54}] handler.UnassignRegionHandler(170): Closed fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:43,889 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1922): Closed testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b. 2024-12-07T00:23:43,889 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] regionserver.HRegion(1635): Region close journal for 97494782415195dbc1a7bbca0b1fac6b: 2024-12-07T00:23:43,889 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=51 updating hbase:meta row=fc29d31c074b28990b2a7e371a24dcc7, regionState=CLOSED 2024-12-07T00:23:43,890 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=53}] handler.UnassignRegionHandler(170): Closed 97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:43,891 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=52 updating hbase:meta row=97494782415195dbc1a7bbca0b1fac6b, regionState=CLOSED 2024-12-07T00:23:43,894 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=54, resume processing ppid=51 2024-12-07T00:23:43,901 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=54, ppid=51, state=SUCCESS; CloseRegionProcedure fc29d31c074b28990b2a7e371a24dcc7, server=6e6711bf2d40,43281,1733530964846 in 197 msec 2024-12-07T00:23:43,901 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=53, resume processing ppid=52 2024-12-07T00:23:43,902 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=53, ppid=52, state=SUCCESS; CloseRegionProcedure 97494782415195dbc1a7bbca0b1fac6b, server=6e6711bf2d40,36059,1733530964705 in 202 msec 2024-12-07T00:23:43,902 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=51, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=fc29d31c074b28990b2a7e371a24dcc7, UNASSIGN in 209 msec 2024-12-07T00:23:43,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=52, resume processing ppid=50 2024-12-07T00:23:43,904 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=52, ppid=50, state=SUCCESS; TransitRegionStateProcedure table=testExportWithResetTtl, region=97494782415195dbc1a7bbca0b1fac6b, UNASSIGN in 217 msec 2024-12-07T00:23:43,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=50, resume processing ppid=49 2024-12-07T00:23:43,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=50, ppid=49, state=SUCCESS; CloseTableRegionsProcedure table=testExportWithResetTtl in 222 msec 2024-12-07T00:23:43,908 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531023908"}]},"ts":"1733531023908"} 2024-12-07T00:23:43,912 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-07T00:23:43,915 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-12-07T00:23:43,918 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=49, state=SUCCESS; DisableTableProcedure table=testExportWithResetTtl in 257 msec 2024-12-07T00:23:43,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=49 2024-12-07T00:23:43,980 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testExportWithResetTtl, procId: 49 completed 2024-12-07T00:23:43,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-12-07T00:23:43,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:43,984 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=55, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:43,984 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testExportWithResetTtl 2024-12-07T00:23:43,987 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-12-07T00:23:43,990 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=55, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:43,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:43,991 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:43,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:43,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:43,993 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-07T00:23:43,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-07T00:23:43,995 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-07T00:23:43,996 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-12-07T00:23:43,996 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:43,996 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:44,000 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/recovered.edits] 2024-12-07T00:23:44,002 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/recovered.edits] 2024-12-07T00:23:44,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:44,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:44,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:44,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,003 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-12-07T00:23:44,004 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T00:23:44,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:44,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:44,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:44,006 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:44,014 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/cf/bbf3a37c5dc04febbb6e8b7f125b917e 2024-12-07T00:23:44,015 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/cf/99c1a1fde797405db71f507936615628 2024-12-07T00:23:44,030 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/recovered.edits/8.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7/recovered.edits/8.seqid 2024-12-07T00:23:44,032 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/fc29d31c074b28990b2a7e371a24dcc7 2024-12-07T00:23:44,039 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/recovered.edits/8.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b/recovered.edits/8.seqid 2024-12-07T00:23:44,040 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportWithResetTtl/97494782415195dbc1a7bbca0b1fac6b 2024-12-07T00:23:44,040 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-12-07T00:23:44,043 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=55, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:44,052 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-12-07T00:23:44,059 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-12-07T00:23:44,060 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=55, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:44,061 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-12-07T00:23:44,061 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531024061"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,061 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531024061"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,089 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:23:44,089 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => fc29d31c074b28990b2a7e371a24dcc7, NAME => 'testExportWithResetTtl,,1733531002788.fc29d31c074b28990b2a7e371a24dcc7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 97494782415195dbc1a7bbca0b1fac6b, NAME => 'testExportWithResetTtl,1,1733531002788.97494782415195dbc1a7bbca0b1fac6b.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:23:44,089 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-12-07T00:23:44,090 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531024089"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T00:23:44,110 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testExportWithResetTtl state from META 2024-12-07T00:23:44,114 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=55, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testExportWithResetTtl 2024-12-07T00:23:44,117 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=55, state=SUCCESS; DeleteTableProcedure table=testExportWithResetTtl in 133 msec 2024-12-07T00:23:44,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=55 2024-12-07T00:23:44,310 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testExportWithResetTtl, procId: 55 completed 2024-12-07T00:23:44,311 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithResetTtl 2024-12-07T00:23:44,311 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-12-07T00:23:44,316 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,319 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531024319"}]},"ts":"1733531024319"} 2024-12-07T00:23:44,322 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-12-07T00:23:44,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T00:23:44,326 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-12-07T00:23:44,327 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-12-07T00:23:44,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, UNASSIGN}] 2024-12-07T00:23:44,333 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, UNASSIGN 2024-12-07T00:23:44,333 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, UNASSIGN 2024-12-07T00:23:44,334 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=d76bbe949105469222eaaf6c93c105b5, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:44,335 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=61c177d8522b4160c7a2d82d13452227, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:44,337 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:44,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=60, ppid=59, state=RUNNABLE; CloseRegionProcedure d76bbe949105469222eaaf6c93c105b5, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:23:44,339 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:23:44,339 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=61, ppid=58, state=RUNNABLE; CloseRegionProcedure 61c177d8522b4160c7a2d82d13452227, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:44,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-12-07T00:23:44,424 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T00:23:44,492 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:44,492 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:23:44,493 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(124): Close 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1681): Closing 61c177d8522b4160c7a2d82d13452227, disabling compactions & flushes 2024-12-07T00:23:44,493 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. after waiting 0 ms 2024-12-07T00:23:44,493 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(124): Close d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:23:44,493 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1681): Closing d76bbe949105469222eaaf6c93c105b5, disabling compactions & flushes 2024-12-07T00:23:44,494 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1703): Closing region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:44,494 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:44,494 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. after waiting 0 ms 2024-12-07T00:23:44,494 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:44,499 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:23:44,500 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:44,500 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:23:44,500 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227. 2024-12-07T00:23:44,500 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1635): Region close journal for 61c177d8522b4160c7a2d82d13452227: 2024-12-07T00:23:44,501 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:23:44,501 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1922): Closed testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5. 2024-12-07T00:23:44,501 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1635): Region close journal for d76bbe949105469222eaaf6c93c105b5: 2024-12-07T00:23:44,504 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(170): Closed 61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:44,505 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=58 updating hbase:meta row=61c177d8522b4160c7a2d82d13452227, regionState=CLOSED 2024-12-07T00:23:44,505 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(170): Closed d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:44,506 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=59 updating hbase:meta row=d76bbe949105469222eaaf6c93c105b5, regionState=CLOSED 2024-12-07T00:23:44,510 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=61, resume processing ppid=58 2024-12-07T00:23:44,511 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=61, ppid=58, state=SUCCESS; CloseRegionProcedure 61c177d8522b4160c7a2d82d13452227, server=6e6711bf2d40,36059,1733530964705 in 169 msec 2024-12-07T00:23:44,511 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=60, resume processing ppid=59 2024-12-07T00:23:44,512 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=60, ppid=59, state=SUCCESS; CloseRegionProcedure d76bbe949105469222eaaf6c93c105b5, server=6e6711bf2d40,33095,1733530964767 in 172 msec 2024-12-07T00:23:44,513 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=58, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=61c177d8522b4160c7a2d82d13452227, UNASSIGN in 181 msec 2024-12-07T00:23:44,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=59, resume processing ppid=57 2024-12-07T00:23:44,514 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=59, ppid=57, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=d76bbe949105469222eaaf6c93c105b5, UNASSIGN in 182 msec 2024-12-07T00:23:44,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=57, resume processing ppid=56 2024-12-07T00:23:44,527 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=57, ppid=56, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 197 msec 2024-12-07T00:23:44,529 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531024529"}]},"ts":"1733531024529"} 2024-12-07T00:23:44,531 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-12-07T00:23:44,535 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-12-07T00:23:44,539 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=56, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithResetTtl in 226 msec 2024-12-07T00:23:44,625 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=56 2024-12-07T00:23:44,626 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl, procId: 56 completed 2024-12-07T00:23:44,626 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-12-07T00:23:44,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,633 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,634 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithResetTtl 2024-12-07T00:23:44,635 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,637 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-12-07T00:23:44,641 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:44,641 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:44,645 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/recovered.edits] 2024-12-07T00:23:44,646 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/recovered.edits] 2024-12-07T00:23:44,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,650 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-07T00:23:44,651 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-12-07T00:23:44,653 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-07T00:23:44,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-12-07T00:23:44,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,654 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:23:44,654 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:23:44,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-12-07T00:23:44,654 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:44,655 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-07T00:23:44,657 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/cf/ec3dcf03540b425db7a6e81d4085a652 2024-12-07T00:23:44,660 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/cf/e3488d5b25ab459c96a5777b59dfbf0f 2024-12-07T00:23:44,662 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5/recovered.edits/9.seqid 2024-12-07T00:23:44,663 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/d76bbe949105469222eaaf6c93c105b5 2024-12-07T00:23:44,666 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227/recovered.edits/9.seqid 2024-12-07T00:23:44,666 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithResetTtl/61c177d8522b4160c7a2d82d13452227 2024-12-07T00:23:44,666 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-12-07T00:23:44,670 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,679 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-12-07T00:23:44,682 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-12-07T00:23:44,683 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,683 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-12-07T00:23:44,684 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531024683"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,684 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531024683"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,687 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:23:44,687 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 61c177d8522b4160c7a2d82d13452227, NAME => 'testtb-testExportWithResetTtl,,1733531001449.61c177d8522b4160c7a2d82d13452227.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => d76bbe949105469222eaaf6c93c105b5, NAME => 'testtb-testExportWithResetTtl,1,1733531001449.d76bbe949105469222eaaf6c93c105b5.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:23:44,687 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-12-07T00:23:44,687 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531024687"}]},"ts":"9223372036854775807"} 2024-12-07T00:23:44,689 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithResetTtl state from META 2024-12-07T00:23:44,692 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-12-07T00:23:44,694 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=62, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithResetTtl in 65 msec 2024-12-07T00:23:44,756 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=62 2024-12-07T00:23:44,757 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl, procId: 62 completed 2024-12-07T00:23:44,770 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" 2024-12-07T00:23:44,772 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-12-07T00:23:44,774 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" 2024-12-07T00:23:44,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb-testExportWithResetTtl 2024-12-07T00:23:44,779 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" 2024-12-07T00:23:44,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-12-07T00:23:44,808 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithResetTtl Thread=787 (was 771) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41521 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2145 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-304303176_1 at /127.0.0.1:36458 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:56014 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:36390 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41549 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-304303176_1 at /127.0.0.1:43656 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34657 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:52868 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 21607) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:34657 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=795 (was 794) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=689 (was 643) - SystemLoadAverage LEAK? -, ProcessCount=20 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=3649 (was 4313) 2024-12-07T00:23:44,809 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-07T00:23:44,829 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=787, OpenFileDescriptor=795, MaxFileDescriptor=1048576, SystemLoadAverage=689, ProcessCount=20, AvailableMemoryMB=3646 2024-12-07T00:23:44,829 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-07T00:23:44,831 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:23:44,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:23:44,834 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:23:44,834 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:44,834 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 63 2024-12-07T00:23:44,840 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:23:44,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T00:23:44,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741961_1137 (size=407) 2024-12-07T00:23:44,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741961_1137 (size=407) 2024-12-07T00:23:44,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741961_1137 (size=407) 2024-12-07T00:23:44,870 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => b0303470530a23be3a9297d9d8b14e31, NAME => 'testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:44,877 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 0c52a506f9fa43bc2896b702def12312, NAME => 'testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:44,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741962_1138 (size=68) 2024-12-07T00:23:44,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741962_1138 (size=68) 2024-12-07T00:23:44,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741962_1138 (size=68) 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing b0303470530a23be3a9297d9d8b14e31, disabling compactions & flushes 2024-12-07T00:23:44,891 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. after waiting 0 ms 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:44,891 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:44,891 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for b0303470530a23be3a9297d9d8b14e31: 2024-12-07T00:23:44,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741963_1139 (size=68) 2024-12-07T00:23:44,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741963_1139 (size=68) 2024-12-07T00:23:44,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741963_1139 (size=68) 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 0c52a506f9fa43bc2896b702def12312, disabling compactions & flushes 2024-12-07T00:23:44,895 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. after waiting 0 ms 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:44,895 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:44,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 0c52a506f9fa43bc2896b702def12312: 2024-12-07T00:23:44,897 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:23:44,897 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733531024897"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531024897"}]},"ts":"1733531024897"} 2024-12-07T00:23:44,897 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733531024897"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531024897"}]},"ts":"1733531024897"} 2024-12-07T00:23:44,900 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:23:44,904 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:23:44,904 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531024904"}]},"ts":"1733531024904"} 2024-12-07T00:23:44,908 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-12-07T00:23:44,913 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:23:44,915 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:23:44,915 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:23:44,915 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:23:44,915 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:23:44,915 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:23:44,915 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:23:44,915 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:23:44,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, ASSIGN}] 2024-12-07T00:23:44,916 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, ASSIGN 2024-12-07T00:23:44,917 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, ASSIGN 2024-12-07T00:23:44,922 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:23:44,922 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:23:44,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T00:23:45,072 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:23:45,073 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=b0303470530a23be3a9297d9d8b14e31, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:45,073 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=0c52a506f9fa43bc2896b702def12312, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:45,075 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=66, ppid=64, state=RUNNABLE; OpenRegionProcedure b0303470530a23be3a9297d9d8b14e31, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:23:45,076 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=67, ppid=65, state=RUNNABLE; OpenRegionProcedure 0c52a506f9fa43bc2896b702def12312, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:23:45,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T00:23:45,228 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:45,228 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:45,232 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:45,232 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:45,232 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7285): Opening region: {ENCODED => b0303470530a23be3a9297d9d8b14e31, NAME => 'testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:23:45,232 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7285): Opening region: {ENCODED => 0c52a506f9fa43bc2896b702def12312, NAME => 'testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:23:45,233 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. service=AccessControlService 2024-12-07T00:23:45,233 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. service=AccessControlService 2024-12-07T00:23:45,233 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:45,233 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:23:45,233 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,233 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7327): checking encryption for 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7327): checking encryption for b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7330): checking classloading for 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,234 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7330): checking classloading for b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,235 INFO [StoreOpener-0c52a506f9fa43bc2896b702def12312-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,236 INFO [StoreOpener-b0303470530a23be3a9297d9d8b14e31-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,237 INFO [StoreOpener-0c52a506f9fa43bc2896b702def12312-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0c52a506f9fa43bc2896b702def12312 columnFamilyName cf 2024-12-07T00:23:45,237 DEBUG [StoreOpener-0c52a506f9fa43bc2896b702def12312-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:45,238 INFO [StoreOpener-0c52a506f9fa43bc2896b702def12312-1 {}] regionserver.HStore(327): Store=0c52a506f9fa43bc2896b702def12312/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:45,239 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,239 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,242 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1085): writing seq id for 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,245 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:45,245 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1102): Opened 0c52a506f9fa43bc2896b702def12312; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=73313365, jitterRate=0.09245426952838898}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:45,246 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1001): Region open journal for 0c52a506f9fa43bc2896b702def12312: 2024-12-07T00:23:45,247 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312., pid=67, masterSystemTime=1733531025228 2024-12-07T00:23:45,249 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:45,249 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:45,249 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=65 updating hbase:meta row=0c52a506f9fa43bc2896b702def12312, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:45,251 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025 {}] assignment.AssignmentManager(1526): Unable to acquire lock for regionNode state=OPEN, location=6e6711bf2d40,36059,1733530964705, table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-12-07T00:23:45,253 INFO [StoreOpener-b0303470530a23be3a9297d9d8b14e31-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b0303470530a23be3a9297d9d8b14e31 columnFamilyName cf 2024-12-07T00:23:45,253 DEBUG [StoreOpener-b0303470530a23be3a9297d9d8b14e31-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:23:45,254 INFO [StoreOpener-b0303470530a23be3a9297d9d8b14e31-1 {}] regionserver.HStore(327): Store=b0303470530a23be3a9297d9d8b14e31/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:23:45,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=67, resume processing ppid=65 2024-12-07T00:23:45,254 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=67, ppid=65, state=SUCCESS; OpenRegionProcedure 0c52a506f9fa43bc2896b702def12312, server=6e6711bf2d40,36059,1733530964705 in 175 msec 2024-12-07T00:23:45,255 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,255 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,255 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=65, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, ASSIGN in 339 msec 2024-12-07T00:23:45,258 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1085): writing seq id for b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,260 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:23:45,261 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1102): Opened b0303470530a23be3a9297d9d8b14e31; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72403052, jitterRate=0.07888954877853394}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:23:45,261 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1001): Region open journal for b0303470530a23be3a9297d9d8b14e31: 2024-12-07T00:23:45,264 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31., pid=66, masterSystemTime=1733531025228 2024-12-07T00:23:45,266 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:45,266 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:45,266 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=64 updating hbase:meta row=b0303470530a23be3a9297d9d8b14e31, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:45,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=66, resume processing ppid=64 2024-12-07T00:23:45,269 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=66, ppid=64, state=SUCCESS; OpenRegionProcedure b0303470530a23be3a9297d9d8b14e31, server=6e6711bf2d40,43281,1733530964846 in 192 msec 2024-12-07T00:23:45,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=64, resume processing ppid=63 2024-12-07T00:23:45,271 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=64, ppid=63, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, ASSIGN in 354 msec 2024-12-07T00:23:45,272 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:23:45,273 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531025272"}]},"ts":"1733531025272"} 2024-12-07T00:23:45,274 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-12-07T00:23:45,277 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:23:45,278 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-12-07T00:23:45,280 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:23:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:23:45,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:45,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:45,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:45,285 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:23:45,286 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=63, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemState in 453 msec 2024-12-07T00:23:45,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=63 2024-12-07T00:23:45,445 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState, procId: 63 completed 2024-12-07T00:23:45,445 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemState get assigned. Timeout = 60000ms 2024-12-07T00:23:45,445 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:45,449 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemState assigned to meta. Checking AM states. 2024-12-07T00:23:45,450 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:45,450 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemState assigned. 2024-12-07T00:23:45,456 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:23:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531025456 (current time:1733531025456). 2024-12-07T00:23:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:23:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-07T00:23:45,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:23:45,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5938a549 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@157971d 2024-12-07T00:23:45,462 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3c685dae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:45,463 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:45,465 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37156, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:45,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5938a549 to 127.0.0.1:60354 2024-12-07T00:23:45,467 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:45,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x11e0512f to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4a39383 2024-12-07T00:23:45,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a583b94, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:45,480 DEBUG [hconnection-0x2d679e5c-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:45,481 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37166, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:45,484 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:45,485 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52708, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:45,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x11e0512f to 127.0.0.1:60354 2024-12-07T00:23:45,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:45,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:23:45,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:23:45,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:23:45,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-07T00:23:45,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:23:45,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T00:23:45,491 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:23:45,496 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:23:45,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741964_1140 (size=170) 2024-12-07T00:23:45,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741964_1140 (size=170) 2024-12-07T00:23:45,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741964_1140 (size=170) 2024-12-07T00:23:45,526 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:23:45,526 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31}, {pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312}] 2024-12-07T00:23:45,529 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,529 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T00:23:45,681 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:45,682 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-12-07T00:23:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2538): Flush status journal for 0c52a506f9fa43bc2896b702def12312: 2024-12-07T00:23:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. for emptySnaptb0-testExportFileSystemState completed. 2024-12-07T00:23:45,683 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-07T00:23:45,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:45,684 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:23:45,686 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:45,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-12-07T00:23:45,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:45,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2538): Flush status journal for b0303470530a23be3a9297d9d8b14e31: 2024-12-07T00:23:45,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. for emptySnaptb0-testExportFileSystemState completed. 2024-12-07T00:23:45,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-12-07T00:23:45,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:45,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:23:45,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741965_1141 (size=71) 2024-12-07T00:23:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741965_1141 (size=71) 2024-12-07T00:23:45,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741965_1141 (size=71) 2024-12-07T00:23:45,712 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:45,712 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-12-07T00:23:45,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=70 2024-12-07T00:23:45,714 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,714 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=70, ppid=68, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:45,719 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=70, ppid=68, state=SUCCESS; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 in 191 msec 2024-12-07T00:23:45,724 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741966_1142 (size=71) 2024-12-07T00:23:45,725 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741966_1142 (size=71) 2024-12-07T00:23:45,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741966_1142 (size=71) 2024-12-07T00:23:45,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:45,729 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-12-07T00:23:45,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=69 2024-12-07T00:23:45,730 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,730 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=69, ppid=68, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:45,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=69, resume processing ppid=68 2024-12-07T00:23:45,734 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=69, ppid=68, state=SUCCESS; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 in 205 msec 2024-12-07T00:23:45,734 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:23:45,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:23:45,735 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:23:45,735 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-12-07T00:23:45,736 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-12-07T00:23:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741967_1143 (size=552) 2024-12-07T00:23:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741967_1143 (size=552) 2024-12-07T00:23:45,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741967_1143 (size=552) 2024-12-07T00:23:45,777 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:23:45,786 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:23:45,786 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-12-07T00:23:45,792 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:23:45,792 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-12-07T00:23:45,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T00:23:45,796 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=68, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 305 msec 2024-12-07T00:23:46,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=68 2024-12-07T00:23:46,096 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 68 completed 2024-12-07T00:23:46,105 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:46,106 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:23:46,110 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemState 2024-12-07T00:23:46,110 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:46,110 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:23:46,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:23:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531026124 (current time:1733531026124). 2024-12-07T00:23:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:23:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-12-07T00:23:46,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:23:46,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1d0573f5 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c818b77 2024-12-07T00:23:46,128 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c93337a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:46,130 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:46,131 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37176, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1d0573f5 to 127.0.0.1:60354 2024-12-07T00:23:46,132 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:46,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1f319758 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1f6b7dfa 2024-12-07T00:23:46,136 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5aa6858e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:23:46,138 DEBUG [hconnection-0x1de6e2e4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:46,138 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37184, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:46,140 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:23:46,141 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52724, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:23:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1f319758 to 127.0.0.1:60354 2024-12-07T00:23:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:23:46,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:23:46,143 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:23:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:23:46,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-07T00:23:46,145 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:23:46,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T00:23:46,146 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:23:46,148 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:23:46,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741968_1144 (size=165) 2024-12-07T00:23:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741968_1144 (size=165) 2024-12-07T00:23:46,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741968_1144 (size=165) 2024-12-07T00:23:46,157 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:23:46,157 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31}, {pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312}] 2024-12-07T00:23:46,158 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:46,158 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:46,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T00:23:46,309 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:23:46,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-12-07T00:23:46,310 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:46,310 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:23:46,310 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2837): Flushing 0c52a506f9fa43bc2896b702def12312 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-12-07T00:23:46,310 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-12-07T00:23:46,311 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:46,311 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2837): Flushing b0303470530a23be3a9297d9d8b14e31 1/1 column families, dataSize=132 B heapSize=544 B 2024-12-07T00:23:46,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/.tmp/cf/19e2bd2ffde74381abbb6bdc3ff0aedf is 71, key is 190a7fc01c12de4ba80292dfbd57dbbe/cf:q/1733531026106/Put/seqid=0 2024-12-07T00:23:46,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741969_1145 (size=8394) 2024-12-07T00:23:46,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741969_1145 (size=8394) 2024-12-07T00:23:46,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741969_1145 (size=8394) 2024-12-07T00:23:46,340 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.13 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/.tmp/cf/19e2bd2ffde74381abbb6bdc3ff0aedf 2024-12-07T00:23:46,343 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/.tmp/cf/9112df4112e74084967bfa110ac31454 is 71, key is 0191240ff7feaaa27c7b49b21f490a49/cf:q/1733531026105/Put/seqid=0 2024-12-07T00:23:46,351 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/.tmp/cf/19e2bd2ffde74381abbb6bdc3ff0aedf as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf 2024-12-07T00:23:46,359 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf, entries=48, sequenceid=6, filesize=8.2 K 2024-12-07T00:23:46,360 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3040): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 0c52a506f9fa43bc2896b702def12312 in 50ms, sequenceid=6, compaction requested=false 2024-12-07T00:23:46,360 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-12-07T00:23:46,361 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2538): Flush status journal for 0c52a506f9fa43bc2896b702def12312: 2024-12-07T00:23:46,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. for snaptb0-testExportFileSystemState completed. 2024-12-07T00:23:46,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-07T00:23:46,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:46,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf] hfiles 2024-12-07T00:23:46,362 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf for snapshot=snaptb0-testExportFileSystemState 2024-12-07T00:23:46,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741970_1146 (size=5216) 2024-12-07T00:23:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741970_1146 (size=5216) 2024-12-07T00:23:46,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741970_1146 (size=5216) 2024-12-07T00:23:46,368 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=132 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/.tmp/cf/9112df4112e74084967bfa110ac31454 2024-12-07T00:23:46,375 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/.tmp/cf/9112df4112e74084967bfa110ac31454 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454 2024-12-07T00:23:46,382 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454, entries=2, sequenceid=6, filesize=5.1 K 2024-12-07T00:23:46,383 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3040): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for b0303470530a23be3a9297d9d8b14e31 in 72ms, sequenceid=6, compaction requested=false 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2538): Flush status journal for b0303470530a23be3a9297d9d8b14e31: 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. for snaptb0-testExportFileSystemState completed. 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454] hfiles 2024-12-07T00:23:46,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454 for snapshot=snaptb0-testExportFileSystemState 2024-12-07T00:23:46,389 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741971_1147 (size=110) 2024-12-07T00:23:46,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741971_1147 (size=110) 2024-12-07T00:23:46,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741971_1147 (size=110) 2024-12-07T00:23:46,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:23:46,391 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-12-07T00:23:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=73 2024-12-07T00:23:46,391 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:46,392 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=73, ppid=71, state=RUNNABLE; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:23:46,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=73, ppid=71, state=SUCCESS; SnapshotRegionProcedure 0c52a506f9fa43bc2896b702def12312 in 236 msec 2024-12-07T00:23:46,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741972_1148 (size=110) 2024-12-07T00:23:46,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741972_1148 (size=110) 2024-12-07T00:23:46,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741972_1148 (size=110) 2024-12-07T00:23:46,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:23:46,406 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-12-07T00:23:46,406 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=72 2024-12-07T00:23:46,406 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:46,407 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=72, ppid=71, state=RUNNABLE; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:23:46,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=72, resume processing ppid=71 2024-12-07T00:23:46,410 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=72, ppid=71, state=SUCCESS; SnapshotRegionProcedure b0303470530a23be3a9297d9d8b14e31 in 251 msec 2024-12-07T00:23:46,410 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:23:46,411 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:23:46,412 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:23:46,412 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-12-07T00:23:46,413 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T00:23:46,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741973_1149 (size=630) 2024-12-07T00:23:46,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741973_1149 (size=630) 2024-12-07T00:23:46,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741973_1149 (size=630) 2024-12-07T00:23:46,441 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:23:46,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T00:23:46,449 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:23:46,449 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T00:23:46,451 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:23:46,451 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-12-07T00:23:46,452 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=71, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 308 msec 2024-12-07T00:23:46,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=71 2024-12-07T00:23:46,750 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState, procId: 71 completed 2024-12-07T00:23:46,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750 2024-12-07T00:23:46,750 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:46,783 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:23:46,783 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T00:23:46,786 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:23:46,791 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-12-07T00:23:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741975_1151 (size=630) 2024-12-07T00:23:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741975_1151 (size=630) 2024-12-07T00:23:46,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741975_1151 (size=630) 2024-12-07T00:23:46,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741974_1150 (size=165) 2024-12-07T00:23:46,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741974_1150 (size=165) 2024-12-07T00:23:46,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741974_1150 (size=165) 2024-12-07T00:23:46,820 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:46,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:46,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:46,821 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,434 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 37320884d80707cf3a5cc8fc74ed6256 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:23:47,434 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region adc79d1e913b17fcc146c0c4daf0e7f9 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:23:47,434 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 0c52a506f9fa43bc2896b702def12312 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:23:47,435 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region b0303470530a23be3a9297d9d8b14e31 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:23:47,788 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0002_000001 (auth:SIMPLE) from 127.0.0.1:39210 2024-12-07T00:23:47,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000001/launch_container.sh] 2024-12-07T00:23:47,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000001/container_tokens] 2024-12-07T00:23:47,801 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0002/container_1733530972728_0002_01_000001/sysfs] 2024-12-07T00:23:47,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-2189873906057949404.jar 2024-12-07T00:23:47,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,913 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,983 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-13708856733122398802.jar 2024-12-07T00:23:47,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,984 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:23:47,985 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:23:47,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:23:47,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:23:47,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:23:47,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:23:47,986 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:23:47,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:23:47,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:23:47,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:23:47,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:23:47,987 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:23:47,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:23:47,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:47,988 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:47,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:47,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:47,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:23:47,989 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:47,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:23:48,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741976_1152 (size=127628) 2024-12-07T00:23:48,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741976_1152 (size=127628) 2024-12-07T00:23:48,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741976_1152 (size=127628) 2024-12-07T00:23:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741977_1153 (size=2172101) 2024-12-07T00:23:48,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741977_1153 (size=2172101) 2024-12-07T00:23:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741977_1153 (size=2172101) 2024-12-07T00:23:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741978_1154 (size=213228) 2024-12-07T00:23:48,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741978_1154 (size=213228) 2024-12-07T00:23:48,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741978_1154 (size=213228) 2024-12-07T00:23:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741979_1155 (size=1877034) 2024-12-07T00:23:48,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741979_1155 (size=1877034) 2024-12-07T00:23:48,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741979_1155 (size=1877034) 2024-12-07T00:23:48,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741980_1156 (size=533455) 2024-12-07T00:23:48,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741980_1156 (size=533455) 2024-12-07T00:23:48,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741980_1156 (size=533455) 2024-12-07T00:23:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741981_1157 (size=7280644) 2024-12-07T00:23:48,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741981_1157 (size=7280644) 2024-12-07T00:23:48,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741981_1157 (size=7280644) 2024-12-07T00:23:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741982_1158 (size=4188619) 2024-12-07T00:23:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741982_1158 (size=4188619) 2024-12-07T00:23:48,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741982_1158 (size=4188619) 2024-12-07T00:23:48,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741983_1159 (size=20406) 2024-12-07T00:23:48,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741983_1159 (size=20406) 2024-12-07T00:23:48,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741983_1159 (size=20406) 2024-12-07T00:23:48,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741984_1160 (size=75495) 2024-12-07T00:23:48,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741984_1160 (size=75495) 2024-12-07T00:23:48,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741984_1160 (size=75495) 2024-12-07T00:23:48,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741985_1161 (size=45609) 2024-12-07T00:23:48,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741985_1161 (size=45609) 2024-12-07T00:23:48,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741985_1161 (size=45609) 2024-12-07T00:23:48,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741986_1162 (size=110084) 2024-12-07T00:23:48,279 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741986_1162 (size=110084) 2024-12-07T00:23:48,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741986_1162 (size=110084) 2024-12-07T00:23:48,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741987_1163 (size=1323991) 2024-12-07T00:23:48,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741987_1163 (size=1323991) 2024-12-07T00:23:48,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741987_1163 (size=1323991) 2024-12-07T00:23:48,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741988_1164 (size=23076) 2024-12-07T00:23:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741988_1164 (size=23076) 2024-12-07T00:23:48,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741988_1164 (size=23076) 2024-12-07T00:23:48,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741989_1165 (size=126803) 2024-12-07T00:23:48,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741989_1165 (size=126803) 2024-12-07T00:23:48,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741989_1165 (size=126803) 2024-12-07T00:23:48,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741990_1166 (size=322274) 2024-12-07T00:23:48,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741990_1166 (size=322274) 2024-12-07T00:23:48,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741990_1166 (size=322274) 2024-12-07T00:23:48,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741991_1167 (size=1832290) 2024-12-07T00:23:48,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741991_1167 (size=1832290) 2024-12-07T00:23:48,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741991_1167 (size=1832290) 2024-12-07T00:23:48,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741992_1168 (size=30081) 2024-12-07T00:23:48,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741992_1168 (size=30081) 2024-12-07T00:23:48,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741992_1168 (size=30081) 2024-12-07T00:23:48,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741993_1169 (size=53616) 2024-12-07T00:23:48,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741993_1169 (size=53616) 2024-12-07T00:23:48,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741993_1169 (size=53616) 2024-12-07T00:23:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741994_1170 (size=451756) 2024-12-07T00:23:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741994_1170 (size=451756) 2024-12-07T00:23:48,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741994_1170 (size=451756) 2024-12-07T00:23:48,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741995_1171 (size=29229) 2024-12-07T00:23:48,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741995_1171 (size=29229) 2024-12-07T00:23:48,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741995_1171 (size=29229) 2024-12-07T00:23:48,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741996_1172 (size=169089) 2024-12-07T00:23:48,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741996_1172 (size=169089) 2024-12-07T00:23:48,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741996_1172 (size=169089) 2024-12-07T00:23:48,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741997_1173 (size=5175431) 2024-12-07T00:23:48,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741997_1173 (size=5175431) 2024-12-07T00:23:48,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741997_1173 (size=5175431) 2024-12-07T00:23:48,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741998_1174 (size=136454) 2024-12-07T00:23:48,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741998_1174 (size=136454) 2024-12-07T00:23:48,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741998_1174 (size=136454) 2024-12-07T00:23:48,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741999_1175 (size=907851) 2024-12-07T00:23:48,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741999_1175 (size=907851) 2024-12-07T00:23:48,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741999_1175 (size=907851) 2024-12-07T00:23:48,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742000_1176 (size=3317408) 2024-12-07T00:23:48,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742000_1176 (size=3317408) 2024-12-07T00:23:48,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742000_1176 (size=3317408) 2024-12-07T00:23:48,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742001_1177 (size=6350154) 2024-12-07T00:23:48,566 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742001_1177 (size=6350154) 2024-12-07T00:23:48,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742001_1177 (size=6350154) 2024-12-07T00:23:48,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742002_1178 (size=503880) 2024-12-07T00:23:48,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742002_1178 (size=503880) 2024-12-07T00:23:48,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742002_1178 (size=503880) 2024-12-07T00:23:48,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T00:23:48,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T00:23:48,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742003_1179 (size=4695811) 2024-12-07T00:23:48,615 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:23:48,619 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-12-07T00:23:48,621 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:23:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742004_1180 (size=344) 2024-12-07T00:23:48,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742004_1180 (size=344) 2024-12-07T00:23:48,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742004_1180 (size=344) 2024-12-07T00:23:48,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742005_1181 (size=15) 2024-12-07T00:23:48,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742005_1181 (size=15) 2024-12-07T00:23:48,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742005_1181 (size=15) 2024-12-07T00:23:48,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742006_1182 (size=304891) 2024-12-07T00:23:48,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742006_1182 (size=304891) 2024-12-07T00:23:48,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742006_1182 (size=304891) 2024-12-07T00:23:48,730 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:48,730 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:23:48,786 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0003_000001 (auth:SIMPLE) from 127.0.0.1:39220 2024-12-07T00:23:49,425 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:23:54,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-07T00:23:54,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-07T00:23:54,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-12-07T00:23:55,089 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0003_000001 (auth:SIMPLE) from 127.0.0.1:57716 2024-12-07T00:23:55,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742007_1183 (size=350565) 2024-12-07T00:23:55,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742007_1183 (size=350565) 2024-12-07T00:23:55,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742007_1183 (size=350565) 2024-12-07T00:23:57,407 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0003_000001 (auth:SIMPLE) from 127.0.0.1:45624 2024-12-07T00:23:59,882 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:24:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742008_1184 (size=8394) 2024-12-07T00:24:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742008_1184 (size=8394) 2024-12-07T00:24:02,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742008_1184 (size=8394) 2024-12-07T00:24:02,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742009_1185 (size=5216) 2024-12-07T00:24:02,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742009_1185 (size=5216) 2024-12-07T00:24:02,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742009_1185 (size=5216) 2024-12-07T00:24:02,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742010_1186 (size=17422) 2024-12-07T00:24:02,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742010_1186 (size=17422) 2024-12-07T00:24:02,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742010_1186 (size=17422) 2024-12-07T00:24:02,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742011_1187 (size=465) 2024-12-07T00:24:02,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742011_1187 (size=465) 2024-12-07T00:24:02,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742011_1187 (size=465) 2024-12-07T00:24:03,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000002/launch_container.sh] 2024-12-07T00:24:03,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000002/container_tokens] 2024-12-07T00:24:03,011 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000002/sysfs] 2024-12-07T00:24:03,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742012_1188 (size=17422) 2024-12-07T00:24:03,059 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742012_1188 (size=17422) 2024-12-07T00:24:03,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742012_1188 (size=17422) 2024-12-07T00:24:03,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742013_1189 (size=350565) 2024-12-07T00:24:03,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742013_1189 (size=350565) 2024-12-07T00:24:03,103 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742013_1189 (size=350565) 2024-12-07T00:24:03,122 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0003_000001 (auth:SIMPLE) from 127.0.0.1:47840 2024-12-07T00:24:04,972 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:24:04,975 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:24:05,047 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemState 2024-12-07T00:24:05,047 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:24:05,048 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:24:05,048 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T00:24:05,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-07T00:24:05,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-07T00:24:05,050 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-12-07T00:24:05,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-12-07T00:24:05,051 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531026750/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-12-07T00:24:05,067 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemState 2024-12-07T00:24:05,069 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-12-07T00:24:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=74, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T00:24:05,072 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531045072"}]},"ts":"1733531045072"} 2024-12-07T00:24:05,074 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-12-07T00:24:05,076 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-12-07T00:24:05,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-12-07T00:24:05,078 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, UNASSIGN}, {pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, UNASSIGN}] 2024-12-07T00:24:05,079 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=77, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, UNASSIGN 2024-12-07T00:24:05,079 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=76, ppid=75, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, UNASSIGN 2024-12-07T00:24:05,080 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=0c52a506f9fa43bc2896b702def12312, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:05,080 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=b0303470530a23be3a9297d9d8b14e31, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:05,081 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:05,082 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=78, ppid=77, state=RUNNABLE; CloseRegionProcedure 0c52a506f9fa43bc2896b702def12312, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:05,082 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:05,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=79, ppid=76, state=RUNNABLE; CloseRegionProcedure b0303470530a23be3a9297d9d8b14e31, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:24:05,174 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T00:24:05,234 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:05,235 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(124): Close 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:24:05,235 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:05,235 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:24:05,235 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1681): Closing 0c52a506f9fa43bc2896b702def12312, disabling compactions & flushes 2024-12-07T00:24:05,235 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. after waiting 0 ms 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:24:05,236 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(124): Close b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1681): Closing b0303470530a23be3a9297d9d8b14e31, disabling compactions & flushes 2024-12-07T00:24:05,236 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. after waiting 0 ms 2024-12-07T00:24:05,236 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:24:05,313 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:24:05,315 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:05,315 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312. 2024-12-07T00:24:05,315 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] regionserver.HRegion(1635): Region close journal for 0c52a506f9fa43bc2896b702def12312: 2024-12-07T00:24:05,318 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=78}] handler.UnassignRegionHandler(170): Closed 0c52a506f9fa43bc2896b702def12312 2024-12-07T00:24:05,319 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=77 updating hbase:meta row=0c52a506f9fa43bc2896b702def12312, regionState=CLOSED 2024-12-07T00:24:05,322 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=78, resume processing ppid=77 2024-12-07T00:24:05,323 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=78, ppid=77, state=SUCCESS; CloseRegionProcedure 0c52a506f9fa43bc2896b702def12312, server=6e6711bf2d40,36059,1733530964705 in 239 msec 2024-12-07T00:24:05,323 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=77, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=0c52a506f9fa43bc2896b702def12312, UNASSIGN in 244 msec 2024-12-07T00:24:05,339 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:24:05,340 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:05,340 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31. 2024-12-07T00:24:05,340 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] regionserver.HRegion(1635): Region close journal for b0303470530a23be3a9297d9d8b14e31: 2024-12-07T00:24:05,342 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=79}] handler.UnassignRegionHandler(170): Closed b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:24:05,343 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=76 updating hbase:meta row=b0303470530a23be3a9297d9d8b14e31, regionState=CLOSED 2024-12-07T00:24:05,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=79, resume processing ppid=76 2024-12-07T00:24:05,346 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=79, ppid=76, state=SUCCESS; CloseRegionProcedure b0303470530a23be3a9297d9d8b14e31, server=6e6711bf2d40,43281,1733530964846 in 261 msec 2024-12-07T00:24:05,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=76, resume processing ppid=75 2024-12-07T00:24:05,349 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=76, ppid=75, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=b0303470530a23be3a9297d9d8b14e31, UNASSIGN in 268 msec 2024-12-07T00:24:05,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=75, resume processing ppid=74 2024-12-07T00:24:05,351 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=75, ppid=74, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 272 msec 2024-12-07T00:24:05,353 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531045352"}]},"ts":"1733531045352"} 2024-12-07T00:24:05,354 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-12-07T00:24:05,357 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-12-07T00:24:05,359 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=74, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemState in 288 msec 2024-12-07T00:24:05,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=74 2024-12-07T00:24:05,376 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState, procId: 74 completed 2024-12-07T00:24:05,377 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-12-07T00:24:05,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,379 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=80, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,380 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemState 2024-12-07T00:24:05,381 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=80, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,383 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-12-07T00:24:05,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,388 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T00:24:05,388 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T00:24:05,389 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T00:24:05,390 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-12-07T00:24:05,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:05,393 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-07T00:24:05,396 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:24:05,407 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/recovered.edits] 2024-12-07T00:24:05,408 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312 2024-12-07T00:24:05,414 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/recovered.edits] 2024-12-07T00:24:05,415 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/cf/9112df4112e74084967bfa110ac31454 2024-12-07T00:24:05,420 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31/recovered.edits/9.seqid 2024-12-07T00:24:05,420 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/cf/19e2bd2ffde74381abbb6bdc3ff0aedf 2024-12-07T00:24:05,421 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/b0303470530a23be3a9297d9d8b14e31 2024-12-07T00:24:05,425 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312/recovered.edits/9.seqid 2024-12-07T00:24:05,426 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemState/0c52a506f9fa43bc2896b702def12312 2024-12-07T00:24:05,426 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-12-07T00:24:05,429 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=80, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,448 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-12-07T00:24:05,464 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-12-07T00:24:05,465 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=80, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,466 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-12-07T00:24:05,466 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531045466"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:05,466 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531045466"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:05,491 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:24:05,491 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => b0303470530a23be3a9297d9d8b14e31, NAME => 'testtb-testExportFileSystemState,,1733531024831.b0303470530a23be3a9297d9d8b14e31.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 0c52a506f9fa43bc2896b702def12312, NAME => 'testtb-testExportFileSystemState,1,1733531024831.0c52a506f9fa43bc2896b702def12312.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:24:05,491 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-12-07T00:24:05,491 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531045491"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:05,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-07T00:24:05,498 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemState state from META 2024-12-07T00:24:05,501 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=80, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-12-07T00:24:05,503 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=80, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemState in 123 msec 2024-12-07T00:24:05,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=80 2024-12-07T00:24:05,696 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState, procId: 80 completed 2024-12-07T00:24:05,707 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" 2024-12-07T00:24:05,709 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-12-07T00:24:05,711 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" 2024-12-07T00:24:05,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemState 2024-12-07T00:24:05,738 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemState Thread=787 (was 787), OpenFileDescriptor=790 (was 795), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=667 (was 689), ProcessCount=20 (was 20), AvailableMemoryMB=3399 (was 3646) 2024-12-07T00:24:05,739 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-07T00:24:05,756 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=787, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=667, ProcessCount=20, AvailableMemoryMB=3398 2024-12-07T00:24:05,757 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=787 is superior to 500 2024-12-07T00:24:05,758 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:24:05,759 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:05,761 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:24:05,762 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:05,762 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 81 2024-12-07T00:24:05,762 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:24:05,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T00:24:05,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742014_1190 (size=404) 2024-12-07T00:24:05,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742014_1190 (size=404) 2024-12-07T00:24:05,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742014_1190 (size=404) 2024-12-07T00:24:05,776 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 91bed0da05e8fe23662227b048d2ad99, NAME => 'testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:05,779 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 8b0383ac081a50d6229b6c7be02fcc0d, NAME => 'testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742016_1192 (size=65) 2024-12-07T00:24:05,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742016_1192 (size=65) 2024-12-07T00:24:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742015_1191 (size=65) 2024-12-07T00:24:05,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742015_1191 (size=65) 2024-12-07T00:24:05,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742015_1191 (size=65) 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:05,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742016_1192 (size=65) 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1681): Closing 91bed0da05e8fe23662227b048d2ad99, disabling compactions & flushes 2024-12-07T00:24:05,817 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. after waiting 0 ms 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:05,817 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:05,817 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1635): Region close journal for 91bed0da05e8fe23662227b048d2ad99: 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1681): Closing 8b0383ac081a50d6229b6c7be02fcc0d, disabling compactions & flushes 2024-12-07T00:24:05,818 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. after waiting 0 ms 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:05,818 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:05,818 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1635): Region close journal for 8b0383ac081a50d6229b6c7be02fcc0d: 2024-12-07T00:24:05,826 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:24:05,826 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531045826"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531045826"}]},"ts":"1733531045826"} 2024-12-07T00:24:05,827 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531045826"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531045826"}]},"ts":"1733531045826"} 2024-12-07T00:24:05,833 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:24:05,836 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:24:05,836 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531045836"}]},"ts":"1733531045836"} 2024-12-07T00:24:05,838 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-12-07T00:24:05,842 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:24:05,844 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:24:05,844 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:24:05,844 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:24:05,844 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:24:05,844 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:24:05,844 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:24:05,844 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:24:05,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, ASSIGN}, {pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, ASSIGN}] 2024-12-07T00:24:05,846 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, ASSIGN 2024-12-07T00:24:05,846 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, ASSIGN 2024-12-07T00:24:05,847 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=82, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:24:05,848 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=83, ppid=81, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:24:05,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T00:24:05,998 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:24:05,998 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=8b0383ac081a50d6229b6c7be02fcc0d, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:05,998 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=91bed0da05e8fe23662227b048d2ad99, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:06,001 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=84, ppid=82, state=RUNNABLE; OpenRegionProcedure 91bed0da05e8fe23662227b048d2ad99, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:06,002 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=85, ppid=83, state=RUNNABLE; OpenRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:24:06,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T00:24:06,153 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:06,156 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:06,159 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,159 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7285): Opening region: {ENCODED => 91bed0da05e8fe23662227b048d2ad99, NAME => 'testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:24:06,160 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. service=AccessControlService 2024-12-07T00:24:06,161 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:06,161 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,161 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:06,161 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7327): checking encryption for 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,161 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(7330): checking classloading for 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,172 INFO [StoreOpener-91bed0da05e8fe23662227b048d2ad99-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,175 INFO [StoreOpener-91bed0da05e8fe23662227b048d2ad99-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91bed0da05e8fe23662227b048d2ad99 columnFamilyName cf 2024-12-07T00:24:06,175 DEBUG [StoreOpener-91bed0da05e8fe23662227b048d2ad99-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:06,176 INFO [StoreOpener-91bed0da05e8fe23662227b048d2ad99-1 {}] regionserver.HStore(327): Store=91bed0da05e8fe23662227b048d2ad99/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:06,177 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(135): Open testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:06,177 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7285): Opening region: {ENCODED => 8b0383ac081a50d6229b6c7be02fcc0d, NAME => 'testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:24:06,177 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. service=AccessControlService 2024-12-07T00:24:06,178 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:06,178 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,178 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(894): Instantiated testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:06,178 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7327): checking encryption for 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,178 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(7330): checking classloading for 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,179 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,179 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,180 INFO [StoreOpener-8b0383ac081a50d6229b6c7be02fcc0d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,182 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1085): writing seq id for 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,184 INFO [StoreOpener-8b0383ac081a50d6229b6c7be02fcc0d-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8b0383ac081a50d6229b6c7be02fcc0d columnFamilyName cf 2024-12-07T00:24:06,184 DEBUG [StoreOpener-8b0383ac081a50d6229b6c7be02fcc0d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:06,185 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:06,185 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1102): Opened 91bed0da05e8fe23662227b048d2ad99; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67488206, jitterRate=0.005652636289596558}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:06,186 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegion(1001): Region open journal for 91bed0da05e8fe23662227b048d2ad99: 2024-12-07T00:24:06,187 INFO [StoreOpener-8b0383ac081a50d6229b6c7be02fcc0d-1 {}] regionserver.HStore(327): Store=8b0383ac081a50d6229b6c7be02fcc0d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:06,187 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99., pid=84, masterSystemTime=1733531046153 2024-12-07T00:24:06,187 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,188 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,189 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,189 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=84}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,190 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=82 updating hbase:meta row=91bed0da05e8fe23662227b048d2ad99, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:06,192 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1085): writing seq id for 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=84, resume processing ppid=82 2024-12-07T00:24:06,193 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=84, ppid=82, state=SUCCESS; OpenRegionProcedure 91bed0da05e8fe23662227b048d2ad99, server=6e6711bf2d40,36059,1733530964705 in 190 msec 2024-12-07T00:24:06,194 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=82, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, ASSIGN in 349 msec 2024-12-07T00:24:06,205 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:06,206 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1102): Opened 8b0383ac081a50d6229b6c7be02fcc0d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64204137, jitterRate=-0.04328380525112152}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:06,206 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegion(1001): Region open journal for 8b0383ac081a50d6229b6c7be02fcc0d: 2024-12-07T00:24:06,207 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d., pid=85, masterSystemTime=1733531046156 2024-12-07T00:24:06,208 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:06,209 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=85}] handler.AssignRegionHandler(164): Opened testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:06,209 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=83 updating hbase:meta row=8b0383ac081a50d6229b6c7be02fcc0d, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:06,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=85, resume processing ppid=83 2024-12-07T00:24:06,212 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=85, ppid=83, state=SUCCESS; OpenRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d, server=6e6711bf2d40,33095,1733530964767 in 208 msec 2024-12-07T00:24:06,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=83, resume processing ppid=81 2024-12-07T00:24:06,222 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=83, ppid=81, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, ASSIGN in 368 msec 2024-12-07T00:24:06,223 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:24:06,223 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531046223"}]},"ts":"1733531046223"} 2024-12-07T00:24:06,225 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-12-07T00:24:06,228 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=81, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:24:06,228 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-12-07T00:24:06,230 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T00:24:06,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:06,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:06,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:06,232 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:06,234 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:06,234 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:06,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:06,235 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:06,236 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=81, state=SUCCESS; CreateTableProcedure table=testtb-testConsecutiveExports in 476 msec 2024-12-07T00:24:06,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=81 2024-12-07T00:24:06,367 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports, procId: 81 completed 2024-12-07T00:24:06,367 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testConsecutiveExports get assigned. Timeout = 60000ms 2024-12-07T00:24:06,368 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:06,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testConsecutiveExports assigned to meta. Checking AM states. 2024-12-07T00:24:06,373 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:06,373 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testConsecutiveExports assigned. 2024-12-07T00:24:06,380 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T00:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531046380 (current time:1733531046380). 2024-12-07T00:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-07T00:24:06,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:24:06,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x01db33bf to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fba0c3e 2024-12-07T00:24:06,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2cc24c74, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:06,413 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,414 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60790, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x01db33bf to 127.0.0.1:60354 2024-12-07T00:24:06,416 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:06,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3d8a6658 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2fb8c665 2024-12-07T00:24:06,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11278e0a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:06,464 DEBUG [hconnection-0x7150051e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,466 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60792, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,471 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49272, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3d8a6658 to 127.0.0.1:60354 2024-12-07T00:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:06,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T00:24:06,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:24:06,474 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=86, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T00:24:06,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-07T00:24:06,476 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:24:06,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T00:24:06,477 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:24:06,480 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:24:06,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742017_1193 (size=161) 2024-12-07T00:24:06,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742017_1193 (size=161) 2024-12-07T00:24:06,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742017_1193 (size=161) 2024-12-07T00:24:06,507 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:24:06,507 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99}, {pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d}] 2024-12-07T00:24:06,511 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,511 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T00:24:06,663 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:06,663 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:06,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=87 2024-12-07T00:24:06,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,664 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=88 2024-12-07T00:24:06,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.HRegion(2538): Flush status journal for 91bed0da05e8fe23662227b048d2ad99: 2024-12-07T00:24:06,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. for emptySnaptb0-testConsecutiveExports completed. 2024-12-07T00:24:06,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.HRegion(2538): Flush status journal for 8b0383ac081a50d6229b6c7be02fcc0d: 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. for emptySnaptb0-testConsecutiveExports completed. 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:06,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:24:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742018_1194 (size=68) 2024-12-07T00:24:06,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742019_1195 (size=68) 2024-12-07T00:24:06,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742019_1195 (size=68) 2024-12-07T00:24:06,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742018_1194 (size=68) 2024-12-07T00:24:06,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:06,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742019_1195 (size=68) 2024-12-07T00:24:06,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=87}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=87 2024-12-07T00:24:06,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=88}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=88 2024-12-07T00:24:06,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742018_1194 (size=68) 2024-12-07T00:24:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=87 2024-12-07T00:24:06,683 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=88 2024-12-07T00:24:06,683 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,683 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,683 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=88, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,683 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=87, ppid=86, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=88, ppid=86, state=SUCCESS; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d in 177 msec 2024-12-07T00:24:06,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=87, resume processing ppid=86 2024-12-07T00:24:06,688 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=87, ppid=86, state=SUCCESS; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 in 177 msec 2024-12-07T00:24:06,688 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:24:06,689 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:24:06,689 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:24:06,690 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:06,691 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:06,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742020_1196 (size=543) 2024-12-07T00:24:06,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742020_1196 (size=543) 2024-12-07T00:24:06,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742020_1196 (size=543) 2024-12-07T00:24:06,707 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:24:06,712 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:24:06,713 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:06,715 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=86, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:24:06,715 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 86 2024-12-07T00:24:06,717 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=86, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=86, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 242 msec 2024-12-07T00:24:06,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=86 2024-12-07T00:24:06,781 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 86 completed 2024-12-07T00:24:06,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:24:06,799 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:24:06,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testConsecutiveExports 2024-12-07T00:24:06,806 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:06,806 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:06,821 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T00:24:06,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531046821 (current time:1733531046821). 2024-12-07T00:24:06,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:24:06,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-12-07T00:24:06,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:24:06,822 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x5b1ef4b9 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@2a176231 2024-12-07T00:24:06,829 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5100aec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:06,831 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,833 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60808, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x5b1ef4b9 to 127.0.0.1:60354 2024-12-07T00:24:06,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:06,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7c2b8516 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6d3c6648 2024-12-07T00:24:06,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@687e4669, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:06,850 DEBUG [hconnection-0x58369bca-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,851 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:06,855 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49282, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:06,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7c2b8516 to 127.0.0.1:60354 2024-12-07T00:24:06,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:06,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-12-07T00:24:06,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:24:06,858 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=89, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-12-07T00:24:06,859 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-07T00:24:06,860 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T00:24:06,863 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:24:06,865 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:24:06,869 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:24:06,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742021_1197 (size=156) 2024-12-07T00:24:06,902 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742021_1197 (size=156) 2024-12-07T00:24:06,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742021_1197 (size=156) 2024-12-07T00:24:06,905 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:24:06,905 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99}, {pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d}] 2024-12-07T00:24:06,906 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:06,906 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:06,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T00:24:07,058 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:07,058 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:07,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=90 2024-12-07T00:24:07,059 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=91 2024-12-07T00:24:07,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:07,059 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:07,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2837): Flushing 8b0383ac081a50d6229b6c7be02fcc0d 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-07T00:24:07,059 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2837): Flushing 91bed0da05e8fe23662227b048d2ad99 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-07T00:24:07,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/.tmp/cf/f8f6ce98ec084109b629f1115a30f387 is 71, key is 19314290fa64a1236fc0ef356aca37ff/cf:q/1733531046799/Put/seqid=0 2024-12-07T00:24:07,095 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/.tmp/cf/a6a746feba0d43ab831d6fc9b1102846 is 71, key is 004bd58839c67709f2426c7a412bd00b/cf:q/1733531046796/Put/seqid=0 2024-12-07T00:24:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742022_1198 (size=8258) 2024-12-07T00:24:07,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742022_1198 (size=8258) 2024-12-07T00:24:07,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742022_1198 (size=8258) 2024-12-07T00:24:07,100 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/.tmp/cf/f8f6ce98ec084109b629f1115a30f387 2024-12-07T00:24:07,110 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/.tmp/cf/f8f6ce98ec084109b629f1115a30f387 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387 2024-12-07T00:24:07,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742023_1199 (size=5354) 2024-12-07T00:24:07,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742023_1199 (size=5354) 2024-12-07T00:24:07,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742023_1199 (size=5354) 2024-12-07T00:24:07,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387, entries=46, sequenceid=6, filesize=8.1 K 2024-12-07T00:24:07,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 8b0383ac081a50d6229b6c7be02fcc0d in 63ms, sequenceid=6, compaction requested=false 2024-12-07T00:24:07,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/.tmp/cf/a6a746feba0d43ab831d6fc9b1102846 2024-12-07T00:24:07,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-12-07T00:24:07,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.HRegion(2538): Flush status journal for 8b0383ac081a50d6229b6c7be02fcc0d: 2024-12-07T00:24:07,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. for snaptb0-testConsecutiveExports completed. 2024-12-07T00:24:07,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-07T00:24:07,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:07,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387] hfiles 2024-12-07T00:24:07,124 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387 for snapshot=snaptb0-testConsecutiveExports 2024-12-07T00:24:07,130 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/.tmp/cf/a6a746feba0d43ab831d6fc9b1102846 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846 2024-12-07T00:24:07,136 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846, entries=4, sequenceid=6, filesize=5.2 K 2024-12-07T00:24:07,144 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 91bed0da05e8fe23662227b048d2ad99 in 85ms, sequenceid=6, compaction requested=false 2024-12-07T00:24:07,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.HRegion(2538): Flush status journal for 91bed0da05e8fe23662227b048d2ad99: 2024-12-07T00:24:07,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. for snaptb0-testConsecutiveExports completed. 2024-12-07T00:24:07,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-12-07T00:24:07,144 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:07,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846] hfiles 2024-12-07T00:24:07,145 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846 for snapshot=snaptb0-testConsecutiveExports 2024-12-07T00:24:07,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742024_1200 (size=107) 2024-12-07T00:24:07,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742024_1200 (size=107) 2024-12-07T00:24:07,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742024_1200 (size=107) 2024-12-07T00:24:07,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:07,163 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=91}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=91 2024-12-07T00:24:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T00:24:07,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=91 2024-12-07T00:24:07,163 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:07,164 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=91, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:07,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=91, ppid=89, state=SUCCESS; SnapshotRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d in 259 msec 2024-12-07T00:24:07,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742025_1201 (size=107) 2024-12-07T00:24:07,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742025_1201 (size=107) 2024-12-07T00:24:07,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742025_1201 (size=107) 2024-12-07T00:24:07,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:07,193 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=90}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=90 2024-12-07T00:24:07,193 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=90 2024-12-07T00:24:07,194 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:07,194 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=90, ppid=89, state=RUNNABLE; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:07,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=90, resume processing ppid=89 2024-12-07T00:24:07,197 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=90, ppid=89, state=SUCCESS; SnapshotRegionProcedure 91bed0da05e8fe23662227b048d2ad99 in 290 msec 2024-12-07T00:24:07,197 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:24:07,198 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:24:07,199 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:24:07,199 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-12-07T00:24:07,200 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T00:24:07,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742026_1202 (size=621) 2024-12-07T00:24:07,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742026_1202 (size=621) 2024-12-07T00:24:07,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742026_1202 (size=621) 2024-12-07T00:24:07,237 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:24:07,243 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:24:07,243 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T00:24:07,245 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=89, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:24:07,245 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 89 2024-12-07T00:24:07,246 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=89, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=89, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 388 msec 2024-12-07T00:24:07,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=89 2024-12-07T00:24:07,465 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports, procId: 89 completed 2024-12-07T00:24:07,465 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465 2024-12-07T00:24:07,465 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:07,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:07,499 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@3c2334f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T00:24:07,501 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:24:07,509 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T00:24:07,550 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:07,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:07,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:07,551 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,609 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-17676233936394912658.jar 2024-12-07T00:24:08,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,610 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,680 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-11742613361140014210.jar 2024-12-07T00:24:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,681 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:08,682 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:24:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:24:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:24:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:24:08,683 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:24:08,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:24:08,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:24:08,684 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:24:08,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:24:08,685 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:24:08,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:24:08,686 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:24:08,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:08,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:08,687 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:08,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:08,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:08,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:08,688 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:08,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742027_1203 (size=127628) 2024-12-07T00:24:08,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742027_1203 (size=127628) 2024-12-07T00:24:08,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742027_1203 (size=127628) 2024-12-07T00:24:08,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742028_1204 (size=2172101) 2024-12-07T00:24:08,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742028_1204 (size=2172101) 2024-12-07T00:24:08,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742028_1204 (size=2172101) 2024-12-07T00:24:08,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742029_1205 (size=213228) 2024-12-07T00:24:08,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742029_1205 (size=213228) 2024-12-07T00:24:08,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742029_1205 (size=213228) 2024-12-07T00:24:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742030_1206 (size=1877034) 2024-12-07T00:24:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742030_1206 (size=1877034) 2024-12-07T00:24:08,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742030_1206 (size=1877034) 2024-12-07T00:24:09,221 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0003_000001 (auth:SIMPLE) from 127.0.0.1:50638 2024-12-07T00:24:09,228 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742031_1207 (size=533455) 2024-12-07T00:24:09,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742031_1207 (size=533455) 2024-12-07T00:24:09,229 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742031_1207 (size=533455) 2024-12-07T00:24:09,233 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000001/launch_container.sh] 2024-12-07T00:24:09,234 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000001/container_tokens] 2024-12-07T00:24:09,234 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0003/container_1733530972728_0003_01_000001/sysfs] 2024-12-07T00:24:09,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742032_1208 (size=7280644) 2024-12-07T00:24:09,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742032_1208 (size=7280644) 2024-12-07T00:24:09,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742032_1208 (size=7280644) 2024-12-07T00:24:09,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742033_1209 (size=6350154) 2024-12-07T00:24:09,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742033_1209 (size=6350154) 2024-12-07T00:24:09,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742033_1209 (size=6350154) 2024-12-07T00:24:09,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T00:24:09,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T00:24:09,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742034_1210 (size=4188619) 2024-12-07T00:24:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742035_1211 (size=20406) 2024-12-07T00:24:09,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742035_1211 (size=20406) 2024-12-07T00:24:09,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742035_1211 (size=20406) 2024-12-07T00:24:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742036_1212 (size=75495) 2024-12-07T00:24:09,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742036_1212 (size=75495) 2024-12-07T00:24:09,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742036_1212 (size=75495) 2024-12-07T00:24:09,769 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742037_1213 (size=45609) 2024-12-07T00:24:09,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742037_1213 (size=45609) 2024-12-07T00:24:09,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742037_1213 (size=45609) 2024-12-07T00:24:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742038_1214 (size=110084) 2024-12-07T00:24:09,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742038_1214 (size=110084) 2024-12-07T00:24:09,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742038_1214 (size=110084) 2024-12-07T00:24:09,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T00:24:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T00:24:09,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742039_1215 (size=1323991) 2024-12-07T00:24:09,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742040_1216 (size=23076) 2024-12-07T00:24:09,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742040_1216 (size=23076) 2024-12-07T00:24:09,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742040_1216 (size=23076) 2024-12-07T00:24:09,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742041_1217 (size=126803) 2024-12-07T00:24:09,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742041_1217 (size=126803) 2024-12-07T00:24:09,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742041_1217 (size=126803) 2024-12-07T00:24:09,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742042_1218 (size=322274) 2024-12-07T00:24:09,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742042_1218 (size=322274) 2024-12-07T00:24:09,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742042_1218 (size=322274) 2024-12-07T00:24:09,913 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T00:24:09,914 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T00:24:09,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742043_1219 (size=1832290) 2024-12-07T00:24:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742044_1220 (size=30081) 2024-12-07T00:24:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742044_1220 (size=30081) 2024-12-07T00:24:09,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742044_1220 (size=30081) 2024-12-07T00:24:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742045_1221 (size=53616) 2024-12-07T00:24:09,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742045_1221 (size=53616) 2024-12-07T00:24:09,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742045_1221 (size=53616) 2024-12-07T00:24:09,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742046_1222 (size=29229) 2024-12-07T00:24:09,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742046_1222 (size=29229) 2024-12-07T00:24:09,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742046_1222 (size=29229) 2024-12-07T00:24:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742047_1223 (size=169089) 2024-12-07T00:24:09,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742047_1223 (size=169089) 2024-12-07T00:24:09,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742047_1223 (size=169089) 2024-12-07T00:24:09,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T00:24:09,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T00:24:10,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742048_1224 (size=5175431) 2024-12-07T00:24:10,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742049_1225 (size=136454) 2024-12-07T00:24:10,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742049_1225 (size=136454) 2024-12-07T00:24:10,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742049_1225 (size=136454) 2024-12-07T00:24:10,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742050_1226 (size=451756) 2024-12-07T00:24:10,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742050_1226 (size=451756) 2024-12-07T00:24:10,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742050_1226 (size=451756) 2024-12-07T00:24:10,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742051_1227 (size=907851) 2024-12-07T00:24:10,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742051_1227 (size=907851) 2024-12-07T00:24:10,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742051_1227 (size=907851) 2024-12-07T00:24:10,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T00:24:10,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T00:24:10,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742052_1228 (size=3317408) 2024-12-07T00:24:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742053_1229 (size=503880) 2024-12-07T00:24:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742053_1229 (size=503880) 2024-12-07T00:24:10,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742053_1229 (size=503880) 2024-12-07T00:24:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T00:24:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T00:24:10,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742054_1230 (size=4695811) 2024-12-07T00:24:10,116 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:24:10,119 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-07T00:24:10,121 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:24:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742055_1231 (size=338) 2024-12-07T00:24:10,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742055_1231 (size=338) 2024-12-07T00:24:10,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742055_1231 (size=338) 2024-12-07T00:24:10,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742056_1232 (size=15) 2024-12-07T00:24:10,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742056_1232 (size=15) 2024-12-07T00:24:10,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742056_1232 (size=15) 2024-12-07T00:24:10,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742057_1233 (size=304934) 2024-12-07T00:24:10,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742057_1233 (size=304934) 2024-12-07T00:24:10,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742057_1233 (size=304934) 2024-12-07T00:24:10,241 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:10,241 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:10,849 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:24:10,998 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0004_000001 (auth:SIMPLE) from 127.0.0.1:47856 2024-12-07T00:24:12,673 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:24:14,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-07T00:24:14,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-12-07T00:24:14,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-12-07T00:24:17,456 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0004_000001 (auth:SIMPLE) from 127.0.0.1:42388 2024-12-07T00:24:17,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742058_1234 (size=350608) 2024-12-07T00:24:17,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742058_1234 (size=350608) 2024-12-07T00:24:17,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742058_1234 (size=350608) 2024-12-07T00:24:19,796 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0004_000001 (auth:SIMPLE) from 127.0.0.1:55772 2024-12-07T00:24:19,881 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:24:24,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742059_1235 (size=17447) 2024-12-07T00:24:24,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742059_1235 (size=17447) 2024-12-07T00:24:24,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742059_1235 (size=17447) 2024-12-07T00:24:24,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742060_1236 (size=462) 2024-12-07T00:24:24,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742060_1236 (size=462) 2024-12-07T00:24:24,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742060_1236 (size=462) 2024-12-07T00:24:24,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742061_1237 (size=17447) 2024-12-07T00:24:24,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742061_1237 (size=17447) 2024-12-07T00:24:24,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742061_1237 (size=17447) 2024-12-07T00:24:24,167 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000002/launch_container.sh] 2024-12-07T00:24:24,167 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000002/container_tokens] 2024-12-07T00:24:24,167 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000002/sysfs] 2024-12-07T00:24:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742062_1238 (size=350608) 2024-12-07T00:24:24,185 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742062_1238 (size=350608) 2024-12-07T00:24:24,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742062_1238 (size=350608) 2024-12-07T00:24:24,203 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0004_000001 (auth:SIMPLE) from 127.0.0.1:33816 2024-12-07T00:24:25,414 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:24:25,414 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:24:25,418 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-07T00:24:25,418 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:24:25,418 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:24:25,418 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T00:24:25,420 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T00:24:25,420 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T00:24:25,420 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@3c2334f in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T00:24:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T00:24:25,421 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T00:24:25,423 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:25,464 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:25,464 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@3c2334f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T00:24:25,467 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:24:25,472 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-12-07T00:24:25,490 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:25,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:25,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:25,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,748 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-3459813475538180493.jar 2024-12-07T00:24:26,749 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,750 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-12855136394434836656.jar 2024-12-07T00:24:26,851 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,852 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:26,853 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:24:26,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:24:26,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:24:26,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:24:26,854 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:24:26,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:24:26,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:24:26,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:24:26,855 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:24:26,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:24:26,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:24:26,856 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:24:26,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:26,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:26,857 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:26,858 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:26,858 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:26,858 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:26,859 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:26,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742063_1239 (size=127628) 2024-12-07T00:24:26,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742063_1239 (size=127628) 2024-12-07T00:24:26,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742063_1239 (size=127628) 2024-12-07T00:24:27,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T00:24:27,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T00:24:27,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742064_1240 (size=2172101) 2024-12-07T00:24:27,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742065_1241 (size=213228) 2024-12-07T00:24:27,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742065_1241 (size=213228) 2024-12-07T00:24:27,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742065_1241 (size=213228) 2024-12-07T00:24:27,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742066_1242 (size=1877034) 2024-12-07T00:24:27,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742066_1242 (size=1877034) 2024-12-07T00:24:27,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742066_1242 (size=1877034) 2024-12-07T00:24:27,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742067_1243 (size=533455) 2024-12-07T00:24:27,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742067_1243 (size=533455) 2024-12-07T00:24:27,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742067_1243 (size=533455) 2024-12-07T00:24:27,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742068_1244 (size=7280644) 2024-12-07T00:24:27,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742068_1244 (size=7280644) 2024-12-07T00:24:27,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742068_1244 (size=7280644) 2024-12-07T00:24:27,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742069_1245 (size=6350154) 2024-12-07T00:24:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742069_1245 (size=6350154) 2024-12-07T00:24:27,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742069_1245 (size=6350154) 2024-12-07T00:24:27,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742070_1246 (size=451756) 2024-12-07T00:24:27,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742070_1246 (size=451756) 2024-12-07T00:24:27,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742070_1246 (size=451756) 2024-12-07T00:24:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T00:24:27,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T00:24:27,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742071_1247 (size=4188619) 2024-12-07T00:24:27,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742072_1248 (size=20406) 2024-12-07T00:24:27,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742072_1248 (size=20406) 2024-12-07T00:24:27,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742072_1248 (size=20406) 2024-12-07T00:24:27,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742073_1249 (size=75495) 2024-12-07T00:24:27,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742073_1249 (size=75495) 2024-12-07T00:24:27,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742073_1249 (size=75495) 2024-12-07T00:24:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742074_1250 (size=45609) 2024-12-07T00:24:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742074_1250 (size=45609) 2024-12-07T00:24:27,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742074_1250 (size=45609) 2024-12-07T00:24:27,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742075_1251 (size=110084) 2024-12-07T00:24:27,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742075_1251 (size=110084) 2024-12-07T00:24:27,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742075_1251 (size=110084) 2024-12-07T00:24:27,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T00:24:27,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T00:24:27,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742076_1252 (size=1323991) 2024-12-07T00:24:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742077_1253 (size=23076) 2024-12-07T00:24:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742077_1253 (size=23076) 2024-12-07T00:24:27,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742077_1253 (size=23076) 2024-12-07T00:24:27,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742078_1254 (size=126803) 2024-12-07T00:24:27,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742078_1254 (size=126803) 2024-12-07T00:24:27,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742078_1254 (size=126803) 2024-12-07T00:24:27,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742079_1255 (size=322274) 2024-12-07T00:24:27,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742079_1255 (size=322274) 2024-12-07T00:24:27,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742079_1255 (size=322274) 2024-12-07T00:24:27,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T00:24:27,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T00:24:27,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742080_1256 (size=1832290) 2024-12-07T00:24:27,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742081_1257 (size=30081) 2024-12-07T00:24:27,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742081_1257 (size=30081) 2024-12-07T00:24:27,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742081_1257 (size=30081) 2024-12-07T00:24:27,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742082_1258 (size=53616) 2024-12-07T00:24:27,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742082_1258 (size=53616) 2024-12-07T00:24:27,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742082_1258 (size=53616) 2024-12-07T00:24:27,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742083_1259 (size=29229) 2024-12-07T00:24:27,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742083_1259 (size=29229) 2024-12-07T00:24:27,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742083_1259 (size=29229) 2024-12-07T00:24:27,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742084_1260 (size=169089) 2024-12-07T00:24:27,606 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742084_1260 (size=169089) 2024-12-07T00:24:27,607 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742084_1260 (size=169089) 2024-12-07T00:24:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T00:24:27,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T00:24:27,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742085_1261 (size=5175431) 2024-12-07T00:24:27,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742086_1262 (size=136454) 2024-12-07T00:24:27,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742086_1262 (size=136454) 2024-12-07T00:24:27,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742086_1262 (size=136454) 2024-12-07T00:24:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742087_1263 (size=907851) 2024-12-07T00:24:28,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742087_1263 (size=907851) 2024-12-07T00:24:28,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742087_1263 (size=907851) 2024-12-07T00:24:28,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T00:24:28,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T00:24:28,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742088_1264 (size=3317408) 2024-12-07T00:24:28,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742089_1265 (size=503880) 2024-12-07T00:24:28,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742089_1265 (size=503880) 2024-12-07T00:24:28,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742089_1265 (size=503880) 2024-12-07T00:24:28,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T00:24:28,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T00:24:28,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742090_1266 (size=4695811) 2024-12-07T00:24:28,213 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:24:28,216 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-12-07T00:24:28,218 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:24:28,225 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742091_1267 (size=338) 2024-12-07T00:24:28,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742091_1267 (size=338) 2024-12-07T00:24:28,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742091_1267 (size=338) 2024-12-07T00:24:28,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742092_1268 (size=15) 2024-12-07T00:24:28,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742092_1268 (size=15) 2024-12-07T00:24:28,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742092_1268 (size=15) 2024-12-07T00:24:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742093_1269 (size=304932) 2024-12-07T00:24:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742093_1269 (size=304932) 2024-12-07T00:24:28,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742093_1269 (size=304932) 2024-12-07T00:24:30,273 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:30,273 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:30,276 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0004_000001 (auth:SIMPLE) from 127.0.0.1:33822 2024-12-07T00:24:30,295 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000001/launch_container.sh] 2024-12-07T00:24:30,296 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000001/container_tokens] 2024-12-07T00:24:30,296 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_0/usercache/jenkins/appcache/application_1733530972728_0004/container_1733530972728_0004_01_000001/sysfs] 2024-12-07T00:24:31,222 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0005_000001 (auth:SIMPLE) from 127.0.0.1:57582 2024-12-07T00:24:37,293 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0005_000001 (auth:SIMPLE) from 127.0.0.1:33210 2024-12-07T00:24:37,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742094_1270 (size=350606) 2024-12-07T00:24:37,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742094_1270 (size=350606) 2024-12-07T00:24:37,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742094_1270 (size=350606) 2024-12-07T00:24:39,633 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0005_000001 (auth:SIMPLE) from 127.0.0.1:50582 2024-12-07T00:24:42,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:24:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742095_1271 (size=16925) 2024-12-07T00:24:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742095_1271 (size=16925) 2024-12-07T00:24:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742095_1271 (size=16925) 2024-12-07T00:24:44,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742096_1272 (size=462) 2024-12-07T00:24:44,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742096_1272 (size=462) 2024-12-07T00:24:44,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742096_1272 (size=462) 2024-12-07T00:24:44,071 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000002/launch_container.sh] 2024-12-07T00:24:44,071 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000002/container_tokens] 2024-12-07T00:24:44,071 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_0/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000002/sysfs] 2024-12-07T00:24:44,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742097_1273 (size=16925) 2024-12-07T00:24:44,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742097_1273 (size=16925) 2024-12-07T00:24:44,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742097_1273 (size=16925) 2024-12-07T00:24:44,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742098_1274 (size=350606) 2024-12-07T00:24:44,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742098_1274 (size=350606) 2024-12-07T00:24:44,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742098_1274 (size=350606) 2024-12-07T00:24:44,106 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0005_000001 (auth:SIMPLE) from 127.0.0.1:46196 2024-12-07T00:24:45,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:24:45,625 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:24:45,641 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testConsecutiveExports 2024-12-07T00:24:45,641 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:24:45,641 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:24:45,641 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T00:24:45,642 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T00:24:45,642 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T00:24:45,642 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in org.apache.hadoop.fs.LocalFileSystem@3c2334f in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-12-07T00:24:45,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-12-07T00:24:45,643 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531047465/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-12-07T00:24:45,667 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testConsecutiveExports 2024-12-07T00:24:45,667 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-12-07T00:24:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=92, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:45,671 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531085671"}]},"ts":"1733531085671"} 2024-12-07T00:24:45,673 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-12-07T00:24:45,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T00:24:45,675 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-12-07T00:24:45,676 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-12-07T00:24:45,678 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, UNASSIGN}, {pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, UNASSIGN}] 2024-12-07T00:24:45,679 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=95, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, UNASSIGN 2024-12-07T00:24:45,679 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=94, ppid=93, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, UNASSIGN 2024-12-07T00:24:45,680 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=8b0383ac081a50d6229b6c7be02fcc0d, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:45,680 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=91bed0da05e8fe23662227b048d2ad99, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:45,682 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:45,682 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=96, ppid=95, state=RUNNABLE; CloseRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:24:45,683 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:45,683 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=97, ppid=94, state=RUNNABLE; CloseRegionProcedure 91bed0da05e8fe23662227b048d2ad99, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:45,775 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T00:24:45,834 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:45,835 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(124): Close 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:45,835 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:24:45,835 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:45,835 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1681): Closing 8b0383ac081a50d6229b6c7be02fcc0d, disabling compactions & flushes 2024-12-07T00:24:45,835 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:45,835 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:45,835 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. after waiting 0 ms 2024-12-07T00:24:45,835 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:45,836 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(124): Close 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:45,836 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:24:45,836 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1681): Closing 91bed0da05e8fe23662227b048d2ad99, disabling compactions & flushes 2024-12-07T00:24:45,836 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1703): Closing region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:45,836 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:45,836 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1791): Acquired close lock on testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. after waiting 0 ms 2024-12-07T00:24:45,836 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1801): Updates disabled for region testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:45,851 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:24:45,851 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:24:45,852 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:45,852 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:45,852 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99. 2024-12-07T00:24:45,852 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1922): Closed testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d. 2024-12-07T00:24:45,852 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] regionserver.HRegion(1635): Region close journal for 91bed0da05e8fe23662227b048d2ad99: 2024-12-07T00:24:45,852 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] regionserver.HRegion(1635): Region close journal for 8b0383ac081a50d6229b6c7be02fcc0d: 2024-12-07T00:24:45,854 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=97}] handler.UnassignRegionHandler(170): Closed 91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:45,855 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=94 updating hbase:meta row=91bed0da05e8fe23662227b048d2ad99, regionState=CLOSED 2024-12-07T00:24:45,855 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=96}] handler.UnassignRegionHandler(170): Closed 8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:45,856 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=95 updating hbase:meta row=8b0383ac081a50d6229b6c7be02fcc0d, regionState=CLOSED 2024-12-07T00:24:45,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=97, resume processing ppid=94 2024-12-07T00:24:45,858 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=97, ppid=94, state=SUCCESS; CloseRegionProcedure 91bed0da05e8fe23662227b048d2ad99, server=6e6711bf2d40,36059,1733530964705 in 173 msec 2024-12-07T00:24:45,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=96, resume processing ppid=95 2024-12-07T00:24:45,859 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=96, ppid=95, state=SUCCESS; CloseRegionProcedure 8b0383ac081a50d6229b6c7be02fcc0d, server=6e6711bf2d40,33095,1733530964767 in 175 msec 2024-12-07T00:24:45,860 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=94, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=91bed0da05e8fe23662227b048d2ad99, UNASSIGN in 180 msec 2024-12-07T00:24:45,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=95, resume processing ppid=93 2024-12-07T00:24:45,861 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=95, ppid=93, state=SUCCESS; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=8b0383ac081a50d6229b6c7be02fcc0d, UNASSIGN in 181 msec 2024-12-07T00:24:45,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=93, resume processing ppid=92 2024-12-07T00:24:45,863 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=93, ppid=92, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 186 msec 2024-12-07T00:24:45,865 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531085864"}]},"ts":"1733531085864"} 2024-12-07T00:24:45,866 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-12-07T00:24:45,869 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-12-07T00:24:45,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=92, state=SUCCESS; DisableTableProcedure table=testtb-testConsecutiveExports in 202 msec 2024-12-07T00:24:45,976 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=92 2024-12-07T00:24:45,977 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports, procId: 92 completed 2024-12-07T00:24:45,977 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-12-07T00:24:45,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:45,979 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=98, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:45,979 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testConsecutiveExports 2024-12-07T00:24:45,980 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=98, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:45,982 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-12-07T00:24:45,984 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:45,984 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:45,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,985 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T00:24:45,986 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T00:24:45,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T00:24:45,987 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-12-07T00:24:45,987 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/recovered.edits] 2024-12-07T00:24:45,988 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/recovered.edits] 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:45,988 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:45,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:45,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-07T00:24:45,996 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/cf/a6a746feba0d43ab831d6fc9b1102846 2024-12-07T00:24:45,997 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/cf/f8f6ce98ec084109b629f1115a30f387 2024-12-07T00:24:46,000 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99/recovered.edits/9.seqid 2024-12-07T00:24:46,001 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d/recovered.edits/9.seqid 2024-12-07T00:24:46,001 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/91bed0da05e8fe23662227b048d2ad99 2024-12-07T00:24:46,001 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testConsecutiveExports/8b0383ac081a50d6229b6c7be02fcc0d 2024-12-07T00:24:46,002 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-12-07T00:24:46,005 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=98, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:46,007 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-12-07T00:24:46,010 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-12-07T00:24:46,011 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=98, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:46,011 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-12-07T00:24:46,011 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531086011"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:46,011 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531086011"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:46,014 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:24:46,014 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 91bed0da05e8fe23662227b048d2ad99, NAME => 'testtb-testConsecutiveExports,,1733531045758.91bed0da05e8fe23662227b048d2ad99.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8b0383ac081a50d6229b6c7be02fcc0d, NAME => 'testtb-testConsecutiveExports,1,1733531045758.8b0383ac081a50d6229b6c7be02fcc0d.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:24:46,014 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-12-07T00:24:46,014 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531086014"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:46,017 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testConsecutiveExports state from META 2024-12-07T00:24:46,024 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=98, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-12-07T00:24:46,025 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=98, state=SUCCESS; DeleteTableProcedure table=testtb-testConsecutiveExports in 47 msec 2024-12-07T00:24:46,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=98 2024-12-07T00:24:46,092 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports, procId: 98 completed 2024-12-07T00:24:46,100 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" 2024-12-07T00:24:46,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-12-07T00:24:46,107 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" 2024-12-07T00:24:46,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testConsecutiveExports 2024-12-07T00:24:46,139 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testConsecutiveExports Thread=781 (was 787), OpenFileDescriptor=782 (was 790), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=736 (was 667) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 20), AvailableMemoryMB=3068 (was 3398) 2024-12-07T00:24:46,139 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-07T00:24:46,158 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=781, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=736, ProcessCount=18, AvailableMemoryMB=3068 2024-12-07T00:24:46,158 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=781 is superior to 500 2024-12-07T00:24:46,160 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:24:46,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:46,162 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:24:46,162 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:46,162 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 99 2024-12-07T00:24:46,163 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:24:46,164 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T00:24:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742099_1275 (size=422) 2024-12-07T00:24:46,178 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742099_1275 (size=422) 2024-12-07T00:24:46,179 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742099_1275 (size=422) 2024-12-07T00:24:46,182 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 1dd98b42eeed044390eae525345b1c17, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:46,182 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => a7e461ebe7d31d10a19f22027b1fb153, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:46,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742100_1276 (size=83) 2024-12-07T00:24:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742100_1276 (size=83) 2024-12-07T00:24:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742100_1276 (size=83) 2024-12-07T00:24:46,195 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742101_1277 (size=83) 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:46,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742101_1277 (size=83) 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1681): Closing a7e461ebe7d31d10a19f22027b1fb153, disabling compactions & flushes 2024-12-07T00:24:46,196 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742101_1277 (size=83) 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. after waiting 0 ms 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,196 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,196 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1635): Region close journal for a7e461ebe7d31d10a19f22027b1fb153: 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1681): Closing 1dd98b42eeed044390eae525345b1c17, disabling compactions & flushes 2024-12-07T00:24:46,197 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. after waiting 0 ms 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,197 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,197 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1635): Region close journal for 1dd98b42eeed044390eae525345b1c17: 2024-12-07T00:24:46,198 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:24:46,198 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733531086198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531086198"}]},"ts":"1733531086198"} 2024-12-07T00:24:46,198 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1733531086198"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531086198"}]},"ts":"1733531086198"} 2024-12-07T00:24:46,201 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:24:46,202 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:24:46,202 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531086202"}]},"ts":"1733531086202"} 2024-12-07T00:24:46,204 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-12-07T00:24:46,208 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:24:46,209 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:24:46,209 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:24:46,209 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:24:46,209 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:24:46,209 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:24:46,209 INFO [PEWorker-5 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:24:46,209 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:24:46,210 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, ASSIGN}, {pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, ASSIGN}] 2024-12-07T00:24:46,211 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, ASSIGN 2024-12-07T00:24:46,211 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, ASSIGN 2024-12-07T00:24:46,212 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=100, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:24:46,212 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=101, ppid=99, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:24:46,265 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T00:24:46,363 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:24:46,363 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=1dd98b42eeed044390eae525345b1c17, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:46,363 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=a7e461ebe7d31d10a19f22027b1fb153, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:46,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE; OpenRegionProcedure a7e461ebe7d31d10a19f22027b1fb153, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:24:46,366 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=103, ppid=100, state=RUNNABLE; OpenRegionProcedure 1dd98b42eeed044390eae525345b1c17, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:46,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T00:24:46,517 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:46,518 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:46,522 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,522 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7285): Opening region: {ENCODED => 1dd98b42eeed044390eae525345b1c17, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:24:46,523 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. service=AccessControlService 2024-12-07T00:24:46,523 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:46,523 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,524 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:46,524 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7327): checking encryption for 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,524 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(7330): checking classloading for 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,525 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,525 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7285): Opening region: {ENCODED => a7e461ebe7d31d10a19f22027b1fb153, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:24:46,525 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. service=AccessControlService 2024-12-07T00:24:46,526 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:46,526 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,526 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:46,526 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7327): checking encryption for a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,526 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(7330): checking classloading for a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,528 INFO [StoreOpener-a7e461ebe7d31d10a19f22027b1fb153-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,528 INFO [StoreOpener-1dd98b42eeed044390eae525345b1c17-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,529 INFO [StoreOpener-a7e461ebe7d31d10a19f22027b1fb153-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a7e461ebe7d31d10a19f22027b1fb153 columnFamilyName cf 2024-12-07T00:24:46,529 DEBUG [StoreOpener-a7e461ebe7d31d10a19f22027b1fb153-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:46,530 INFO [StoreOpener-a7e461ebe7d31d10a19f22027b1fb153-1 {}] regionserver.HStore(327): Store=a7e461ebe7d31d10a19f22027b1fb153/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:46,531 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,531 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,532 INFO [StoreOpener-1dd98b42eeed044390eae525345b1c17-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1dd98b42eeed044390eae525345b1c17 columnFamilyName cf 2024-12-07T00:24:46,532 DEBUG [StoreOpener-1dd98b42eeed044390eae525345b1c17-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:46,533 INFO [StoreOpener-1dd98b42eeed044390eae525345b1c17-1 {}] regionserver.HStore(327): Store=1dd98b42eeed044390eae525345b1c17/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:46,533 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,534 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1085): writing seq id for a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,534 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,536 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1085): writing seq id for 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,536 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:46,537 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1102): Opened a7e461ebe7d31d10a19f22027b1fb153; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68998965, jitterRate=0.02816469967365265}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:46,538 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegion(1001): Region open journal for a7e461ebe7d31d10a19f22027b1fb153: 2024-12-07T00:24:46,538 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:46,538 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153., pid=102, masterSystemTime=1733531086517 2024-12-07T00:24:46,539 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1102): Opened 1dd98b42eeed044390eae525345b1c17; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59632170, jitterRate=-0.11141142249107361}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:46,540 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegion(1001): Region open journal for 1dd98b42eeed044390eae525345b1c17: 2024-12-07T00:24:46,540 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,540 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=102}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,540 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17., pid=103, masterSystemTime=1733531086518 2024-12-07T00:24:46,541 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=101 updating hbase:meta row=a7e461ebe7d31d10a19f22027b1fb153, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:46,545 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,545 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=103}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:46,546 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=100 updating hbase:meta row=1dd98b42eeed044390eae525345b1c17, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:46,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=102, resume processing ppid=101 2024-12-07T00:24:46,546 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=102, ppid=101, state=SUCCESS; OpenRegionProcedure a7e461ebe7d31d10a19f22027b1fb153, server=6e6711bf2d40,43281,1733530964846 in 178 msec 2024-12-07T00:24:46,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=101, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, ASSIGN in 336 msec 2024-12-07T00:24:46,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=103, resume processing ppid=100 2024-12-07T00:24:46,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=103, ppid=100, state=SUCCESS; OpenRegionProcedure 1dd98b42eeed044390eae525345b1c17, server=6e6711bf2d40,36059,1733530964705 in 182 msec 2024-12-07T00:24:46,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=100, resume processing ppid=99 2024-12-07T00:24:46,552 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=100, ppid=99, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, ASSIGN in 340 msec 2024-12-07T00:24:46,553 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:24:46,553 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531086553"}]},"ts":"1733531086553"} 2024-12-07T00:24:46,554 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-12-07T00:24:46,557 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=99, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:24:46,557 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-12-07T00:24:46,559 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T00:24:46,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:46,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:46,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:46,561 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:46,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:46,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:46,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:46,563 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:46,565 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=99, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 403 msec 2024-12-07T00:24:46,768 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=99 2024-12-07T00:24:46,768 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 99 completed 2024-12-07T00:24:46,768 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithMergeRegion get assigned. Timeout = 60000ms 2024-12-07T00:24:46,769 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:46,772 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned to meta. Checking AM states. 2024-12-07T00:24:46,772 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:46,772 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithMergeRegion assigned. 2024-12-07T00:24:46,776 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T00:24:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531086776 (current time:1733531086776). 2024-12-07T00:24:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:24:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-07T00:24:46,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:24:46,777 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x3e719800 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@1edc60a6 2024-12-07T00:24:46,783 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5d079874, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:46,785 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:46,786 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52796, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x3e719800 to 127.0.0.1:60354 2024-12-07T00:24:46,787 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:46,788 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2c9d8221 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51b91e44 2024-12-07T00:24:46,794 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@309ce83f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:46,800 DEBUG [hconnection-0x5baadbbe-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:46,801 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52806, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:46,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:46,805 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45370, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2c9d8221 to 127.0.0.1:60354 2024-12-07T00:24:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:46,808 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T00:24:46,809 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:24:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T00:24:46,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-07T00:24:46,812 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T00:24:46,812 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:24:46,813 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:24:46,816 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:24:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742102_1278 (size=215) 2024-12-07T00:24:46,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742102_1278 (size=215) 2024-12-07T00:24:46,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742102_1278 (size=215) 2024-12-07T00:24:46,844 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:24:46,844 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17}, {pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153}] 2024-12-07T00:24:46,845 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:46,845 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:46,892 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-12-07T00:24:46,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T00:24:46,996 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:46,996 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:46,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-12-07T00:24:46,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:46,997 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-12-07T00:24:46,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2538): Flush status journal for a7e461ebe7d31d10a19f22027b1fb153: 2024-12-07T00:24:46,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T00:24:46,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:46,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:46,998 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:24:47,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2538): Flush status journal for 1dd98b42eeed044390eae525345b1c17: 2024-12-07T00:24:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T00:24:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:47,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:24:47,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742103_1279 (size=86) 2024-12-07T00:24:47,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742103_1279 (size=86) 2024-12-07T00:24:47,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742103_1279 (size=86) 2024-12-07T00:24:47,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:47,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-12-07T00:24:47,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=106 2024-12-07T00:24:47,026 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:47,026 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=106, ppid=104, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:47,028 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=106, ppid=104, state=SUCCESS; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 in 183 msec 2024-12-07T00:24:47,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742104_1280 (size=86) 2024-12-07T00:24:47,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742104_1280 (size=86) 2024-12-07T00:24:47,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742104_1280 (size=86) 2024-12-07T00:24:47,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:47,037 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-12-07T00:24:47,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=105 2024-12-07T00:24:47,038 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:47,038 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=105, ppid=104, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:47,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=105, resume processing ppid=104 2024-12-07T00:24:47,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=105, ppid=104, state=SUCCESS; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 in 195 msec 2024-12-07T00:24:47,040 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:24:47,041 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:24:47,042 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:24:47,042 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,042 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742105_1281 (size=597) 2024-12-07T00:24:47,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742105_1281 (size=597) 2024-12-07T00:24:47,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742105_1281 (size=597) 2024-12-07T00:24:47,099 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:24:47,105 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:24:47,106 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,107 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:24:47,107 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-12-07T00:24:47,109 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=104, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 298 msec 2024-12-07T00:24:47,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=104 2024-12-07T00:24:47,114 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 104 completed 2024-12-07T00:24:47,129 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:24:47,142 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:24:47,157 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,157 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:47,157 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:24:47,203 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T00:24:47,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531087203 (current time:1733531087203). 2024-12-07T00:24:47,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:24:47,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-12-07T00:24:47,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:24:47,204 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x553e7d0f to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58443b3d 2024-12-07T00:24:47,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75bc2383, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:47,235 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:47,236 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52822, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x553e7d0f to 127.0.0.1:60354 2024-12-07T00:24:47,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:47,239 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74dc531d to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@db80368 2024-12-07T00:24:47,248 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@190c9967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:47,252 DEBUG [hconnection-0x2f446e1e-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:47,253 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52834, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:47,255 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:47,256 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45376, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:47,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74dc531d to 127.0.0.1:60354 2024-12-07T00:24:47,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:47,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-12-07T00:24:47,260 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:24:47,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=107, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-12-07T00:24:47,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-07T00:24:47,262 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:24:47,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T00:24:47,264 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:24:47,267 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:24:47,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742106_1282 (size=210) 2024-12-07T00:24:47,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742106_1282 (size=210) 2024-12-07T00:24:47,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742106_1282 (size=210) 2024-12-07T00:24:47,296 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:24:47,296 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17}, {pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153}] 2024-12-07T00:24:47,297 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:47,297 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:47,365 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T00:24:47,448 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:47,448 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:24:47,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=109 2024-12-07T00:24:47,449 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=108 2024-12-07T00:24:47,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:47,449 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:47,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2837): Flushing 1dd98b42eeed044390eae525345b1c17 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T00:24:47,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2837): Flushing a7e461ebe7d31d10a19f22027b1fb153 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T00:24:47,479 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/.tmp/cf/3b7a9031410f4c188a69a91cbd2d1989 is 71, key is 0277ac74d5340dd26581d27315537f10/cf:q/1733531087129/Put/seqid=0 2024-12-07T00:24:47,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/.tmp/cf/409924b3fa254592ab7430a5da4779a5 is 71, key is 163e5f9e76b316819bc4537b801d9b20/cf:q/1733531087142/Put/seqid=0 2024-12-07T00:24:47,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742107_1283 (size=5422) 2024-12-07T00:24:47,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742107_1283 (size=5422) 2024-12-07T00:24:47,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742107_1283 (size=5422) 2024-12-07T00:24:47,511 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/.tmp/cf/3b7a9031410f4c188a69a91cbd2d1989 2024-12-07T00:24:47,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742108_1284 (size=8188) 2024-12-07T00:24:47,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742108_1284 (size=8188) 2024-12-07T00:24:47,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742108_1284 (size=8188) 2024-12-07T00:24:47,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/.tmp/cf/3b7a9031410f4c188a69a91cbd2d1989 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989 2024-12-07T00:24:47,557 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/.tmp/cf/409924b3fa254592ab7430a5da4779a5 2024-12-07T00:24:47,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T00:24:47,567 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/.tmp/cf/409924b3fa254592ab7430a5da4779a5 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5 2024-12-07T00:24:47,570 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989, entries=5, sequenceid=6, filesize=5.3 K 2024-12-07T00:24:47,571 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 1dd98b42eeed044390eae525345b1c17 in 122ms, sequenceid=6, compaction requested=false 2024-12-07T00:24:47,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.HRegion(2538): Flush status journal for 1dd98b42eeed044390eae525345b1c17: 2024-12-07T00:24:47,571 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T00:24:47,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:47,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989] hfiles 2024-12-07T00:24:47,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,577 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5, entries=45, sequenceid=6, filesize=8.0 K 2024-12-07T00:24:47,578 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for a7e461ebe7d31d10a19f22027b1fb153 in 129ms, sequenceid=6, compaction requested=false 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.HRegion(2538): Flush status journal for a7e461ebe7d31d10a19f22027b1fb153: 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5] hfiles 2024-12-07T00:24:47,578 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742109_1285 (size=125) 2024-12-07T00:24:47,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742109_1285 (size=125) 2024-12-07T00:24:47,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742109_1285 (size=125) 2024-12-07T00:24:47,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:24:47,599 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=108}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=108 2024-12-07T00:24:47,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=108 2024-12-07T00:24:47,600 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:47,600 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=108, ppid=107, state=RUNNABLE; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:24:47,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=108, ppid=107, state=SUCCESS; SnapshotRegionProcedure 1dd98b42eeed044390eae525345b1c17 in 308 msec 2024-12-07T00:24:47,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742110_1286 (size=125) 2024-12-07T00:24:47,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742110_1286 (size=125) 2024-12-07T00:24:47,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742110_1286 (size=125) 2024-12-07T00:24:47,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:24:47,611 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=109}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=109 2024-12-07T00:24:47,612 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=109 2024-12-07T00:24:47,612 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:47,612 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=109, ppid=107, state=RUNNABLE; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:24:47,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=109, resume processing ppid=107 2024-12-07T00:24:47,620 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=109, ppid=107, state=SUCCESS; SnapshotRegionProcedure a7e461ebe7d31d10a19f22027b1fb153 in 317 msec 2024-12-07T00:24:47,620 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:24:47,622 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:24:47,623 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:24:47,623 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,625 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742111_1287 (size=675) 2024-12-07T00:24:47,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742111_1287 (size=675) 2024-12-07T00:24:47,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742111_1287 (size=675) 2024-12-07T00:24:47,660 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:24:47,676 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:24:47,677 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:47,678 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=107, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:24:47,679 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 107 2024-12-07T00:24:47,680 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=107, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=107, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 419 msec 2024-12-07T00:24:47,829 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 1dd98b42eeed044390eae525345b1c17 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:24:47,829 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region a7e461ebe7d31d10a19f22027b1fb153 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:24:47,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=107 2024-12-07T00:24:47,868 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 107 completed 2024-12-07T00:24:47,897 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:24:47,922 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45386, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:24:47,925 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T00:24:47,926 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:24:47,932 INFO [RS-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49976, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:24:47,933 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T00:24:47,936 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-12-07T00:24:47,949 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52850, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-12-07T00:24:47,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-12-07T00:24:47,954 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:24:47,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:47,959 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:24:47,959 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:47,960 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 110 2024-12-07T00:24:47,960 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:24:47,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T00:24:48,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742112_1288 (size=399) 2024-12-07T00:24:48,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742112_1288 (size=399) 2024-12-07T00:24:48,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742112_1288 (size=399) 2024-12-07T00:24:48,034 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 6c4dec1bc7503af64d0f67bf7ec5a5f9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:48,043 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => fdaba13ef1aa6e8697903f03e0a7fc29, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:48,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T00:24:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742113_1289 (size=85) 2024-12-07T00:24:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742113_1289 (size=85) 2024-12-07T00:24:48,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742113_1289 (size=85) 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1681): Closing 6c4dec1bc7503af64d0f67bf7ec5a5f9, disabling compactions & flushes 2024-12-07T00:24:48,108 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. after waiting 0 ms 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,108 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,108 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1635): Region close journal for 6c4dec1bc7503af64d0f67bf7ec5a5f9: 2024-12-07T00:24:48,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742114_1290 (size=85) 2024-12-07T00:24:48,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742114_1290 (size=85) 2024-12-07T00:24:48,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742114_1290 (size=85) 2024-12-07T00:24:48,123 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:48,124 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1681): Closing fdaba13ef1aa6e8697903f03e0a7fc29, disabling compactions & flushes 2024-12-07T00:24:48,124 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,124 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,124 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. after waiting 0 ms 2024-12-07T00:24:48,124 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,124 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,124 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1635): Region close journal for fdaba13ef1aa6e8697903f03e0a7fc29: 2024-12-07T00:24:48,125 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:24:48,126 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733531088125"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531088125"}]},"ts":"1733531088125"} 2024-12-07T00:24:48,126 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1733531088125"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531088125"}]},"ts":"1733531088125"} 2024-12-07T00:24:48,129 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:24:48,129 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:24:48,130 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531088129"}]},"ts":"1733531088129"} 2024-12-07T00:24:48,131 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-12-07T00:24:48,135 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:24:48,136 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:24:48,136 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:24:48,136 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:24:48,136 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:24:48,136 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:24:48,136 INFO [PEWorker-4 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:24:48,136 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:24:48,137 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, ASSIGN}, {pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, ASSIGN}] 2024-12-07T00:24:48,138 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, ASSIGN 2024-12-07T00:24:48,138 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, ASSIGN 2024-12-07T00:24:48,139 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=112, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:24:48,139 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(264): Starting pid=111, ppid=110, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:24:48,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T00:24:48,289 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:24:48,290 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=6c4dec1bc7503af64d0f67bf7ec5a5f9, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:48,290 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=fdaba13ef1aa6e8697903f03e0a7fc29, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:48,292 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=113, ppid=112, state=RUNNABLE; OpenRegionProcedure fdaba13ef1aa6e8697903f03e0a7fc29, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:24:48,293 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=114, ppid=111, state=RUNNABLE; OpenRegionProcedure 6c4dec1bc7503af64d0f67bf7ec5a5f9, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:48,445 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:48,446 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:48,448 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,449 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7285): Opening region: {ENCODED => fdaba13ef1aa6e8697903f03e0a7fc29, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.', STARTKEY => '2', ENDKEY => ''} 2024-12-07T00:24:48,449 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. service=AccessControlService 2024-12-07T00:24:48,449 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:48,449 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,450 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:48,450 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7327): checking encryption for fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,450 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(7330): checking classloading for fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,450 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,450 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7285): Opening region: {ENCODED => 6c4dec1bc7503af64d0f67bf7ec5a5f9, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.', STARTKEY => '', ENDKEY => '2'} 2024-12-07T00:24:48,450 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. service=AccessControlService 2024-12-07T00:24:48,450 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:48,451 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,451 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:48,451 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7327): checking encryption for 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,451 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(7330): checking classloading for 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,452 INFO [StoreOpener-6c4dec1bc7503af64d0f67bf7ec5a5f9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,452 INFO [StoreOpener-fdaba13ef1aa6e8697903f03e0a7fc29-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,454 INFO [StoreOpener-6c4dec1bc7503af64d0f67bf7ec5a5f9-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6c4dec1bc7503af64d0f67bf7ec5a5f9 columnFamilyName cf 2024-12-07T00:24:48,454 DEBUG [StoreOpener-6c4dec1bc7503af64d0f67bf7ec5a5f9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:48,454 INFO [StoreOpener-fdaba13ef1aa6e8697903f03e0a7fc29-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fdaba13ef1aa6e8697903f03e0a7fc29 columnFamilyName cf 2024-12-07T00:24:48,454 DEBUG [StoreOpener-fdaba13ef1aa6e8697903f03e0a7fc29-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:48,454 INFO [StoreOpener-6c4dec1bc7503af64d0f67bf7ec5a5f9-1 {}] regionserver.HStore(327): Store=6c4dec1bc7503af64d0f67bf7ec5a5f9/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:48,455 INFO [StoreOpener-fdaba13ef1aa6e8697903f03e0a7fc29-1 {}] regionserver.HStore(327): Store=fdaba13ef1aa6e8697903f03e0a7fc29/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:48,456 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,456 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,457 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,457 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,459 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1085): writing seq id for 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,459 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1085): writing seq id for fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:48,464 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1102): Opened 6c4dec1bc7503af64d0f67bf7ec5a5f9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71070558, jitterRate=0.0590338408946991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:48,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegion(1001): Region open journal for 6c4dec1bc7503af64d0f67bf7ec5a5f9: 2024-12-07T00:24:48,465 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9., pid=114, masterSystemTime=1733531088446 2024-12-07T00:24:48,467 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,468 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=114}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,469 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=111 updating hbase:meta row=6c4dec1bc7503af64d0f67bf7ec5a5f9, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:48,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=114, resume processing ppid=111 2024-12-07T00:24:48,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=114, ppid=111, state=SUCCESS; OpenRegionProcedure 6c4dec1bc7503af64d0f67bf7ec5a5f9, server=6e6711bf2d40,36059,1733530964705 in 177 msec 2024-12-07T00:24:48,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=111, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, ASSIGN in 335 msec 2024-12-07T00:24:48,478 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:24:48,479 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1102): Opened fdaba13ef1aa6e8697903f03e0a7fc29; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58735671, jitterRate=-0.12477029860019684}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:48,479 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegion(1001): Region open journal for fdaba13ef1aa6e8697903f03e0a7fc29: 2024-12-07T00:24:48,480 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29., pid=113, masterSystemTime=1733531088444 2024-12-07T00:24:48,490 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,490 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=113}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,491 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=112 updating hbase:meta row=fdaba13ef1aa6e8697903f03e0a7fc29, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:48,499 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=113, resume processing ppid=112 2024-12-07T00:24:48,500 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=113, ppid=112, state=SUCCESS; OpenRegionProcedure fdaba13ef1aa6e8697903f03e0a7fc29, server=6e6711bf2d40,33095,1733530964767 in 203 msec 2024-12-07T00:24:48,502 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=112, resume processing ppid=110 2024-12-07T00:24:48,503 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=112, ppid=110, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, ASSIGN in 362 msec 2024-12-07T00:24:48,504 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:24:48,504 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531088504"}]},"ts":"1733531088504"} 2024-12-07T00:24:48,507 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-12-07T00:24:48,510 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=110, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:24:48,510 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-12-07T00:24:48,512 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-07T00:24:48,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:48,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:48,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:48,514 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:24:48,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,516 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,517 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-12-07T00:24:48,519 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=110, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 563 msec 2024-12-07T00:24:48,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=110 2024-12-07T00:24:48,571 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 110 completed 2024-12-07T00:24:48,600 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$2(2219): Client=jenkins//172.17.0.2 merge regions [6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29] 2024-12-07T00:24:48,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29], force=true 2024-12-07T00:24:48,608 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29], force=true 2024-12-07T00:24:48,608 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29], force=true 2024-12-07T00:24:48,608 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=115, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29], force=true 2024-12-07T00:24:48,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T00:24:48,626 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, UNASSIGN}, {pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, UNASSIGN}] 2024-12-07T00:24:48,627 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=116, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, UNASSIGN 2024-12-07T00:24:48,628 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=117, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, UNASSIGN 2024-12-07T00:24:48,629 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=fdaba13ef1aa6e8697903f03e0a7fc29, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:48,629 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=6c4dec1bc7503af64d0f67bf7ec5a5f9, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:48,630 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:48,632 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=118, ppid=117, state=RUNNABLE; CloseRegionProcedure fdaba13ef1aa6e8697903f03e0a7fc29, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:24:48,634 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-12-07T00:24:48,634 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=119, ppid=116, state=RUNNABLE; CloseRegionProcedure 6c4dec1bc7503af64d0f67bf7ec5a5f9, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:48,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T00:24:48,790 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:24:48,790 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(124): Close 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(124): Close fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(138): Unassign region: split region: true: evictCache: true 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1681): Closing 6c4dec1bc7503af64d0f67bf7ec5a5f9, disabling compactions & flushes 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1681): Closing fdaba13ef1aa6e8697903f03e0a7fc29, disabling compactions & flushes 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. after waiting 0 ms 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. after waiting 0 ms 2024-12-07T00:24:48,791 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(2837): Flushing 6c4dec1bc7503af64d0f67bf7ec5a5f9 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-07T00:24:48,791 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(2837): Flushing fdaba13ef1aa6e8697903f03e0a7fc29 1/1 column families, dataSize=24 B heapSize=352 B 2024-12-07T00:24:48,818 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/.tmp/cf/e8900aeda8b5454ea1cc00ef9ccf9678 is 28, key is 1/cf:/1733531088577/Put/seqid=0 2024-12-07T00:24:48,819 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/.tmp/cf/d2a9235fcf104a3dbab9f9ba3834164b is 28, key is 2/cf:/1733531088584/Put/seqid=0 2024-12-07T00:24:48,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742116_1292 (size=4945) 2024-12-07T00:24:48,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742116_1292 (size=4945) 2024-12-07T00:24:48,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742116_1292 (size=4945) 2024-12-07T00:24:48,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742115_1291 (size=4945) 2024-12-07T00:24:48,848 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/.tmp/cf/d2a9235fcf104a3dbab9f9ba3834164b 2024-12-07T00:24:48,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742115_1291 (size=4945) 2024-12-07T00:24:48,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742115_1291 (size=4945) 2024-12-07T00:24:48,849 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/.tmp/cf/e8900aeda8b5454ea1cc00ef9ccf9678 2024-12-07T00:24:48,857 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/.tmp/cf/e8900aeda8b5454ea1cc00ef9ccf9678 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678 2024-12-07T00:24:48,866 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678, entries=1, sequenceid=5, filesize=4.8 K 2024-12-07T00:24:48,867 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for 6c4dec1bc7503af64d0f67bf7ec5a5f9 in 76ms, sequenceid=5, compaction requested=false 2024-12-07T00:24:48,867 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/.tmp/cf/d2a9235fcf104a3dbab9f9ba3834164b as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b 2024-12-07T00:24:48,867 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-12-07T00:24:48,875 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b, entries=1, sequenceid=5, filesize=4.8 K 2024-12-07T00:24:48,876 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(3040): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for fdaba13ef1aa6e8697903f03e0a7fc29 in 85ms, sequenceid=5, compaction requested=false 2024-12-07T00:24:48,888 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:24:48,889 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:48,889 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9. 2024-12-07T00:24:48,889 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] regionserver.HRegion(1635): Region close journal for 6c4dec1bc7503af64d0f67bf7ec5a5f9: 2024-12-07T00:24:48,892 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=119}] handler.UnassignRegionHandler(170): Closed 6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:24:48,892 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=116 updating hbase:meta row=6c4dec1bc7503af64d0f67bf7ec5a5f9, regionState=CLOSED 2024-12-07T00:24:48,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=119, resume processing ppid=116 2024-12-07T00:24:48,898 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=119, ppid=116, state=SUCCESS; CloseRegionProcedure 6c4dec1bc7503af64d0f67bf7ec5a5f9, server=6e6711bf2d40,36059,1733530964705 in 260 msec 2024-12-07T00:24:48,900 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=116, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=6c4dec1bc7503af64d0f67bf7ec5a5f9, UNASSIGN in 272 msec 2024-12-07T00:24:48,901 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:24:48,901 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:24:48,901 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29. 2024-12-07T00:24:48,901 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] regionserver.HRegion(1635): Region close journal for fdaba13ef1aa6e8697903f03e0a7fc29: 2024-12-07T00:24:48,904 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=118}] handler.UnassignRegionHandler(170): Closed fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:24:48,911 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=117 updating hbase:meta row=fdaba13ef1aa6e8697903f03e0a7fc29, regionState=CLOSED 2024-12-07T00:24:48,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=118, resume processing ppid=117 2024-12-07T00:24:48,915 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=118, ppid=117, state=SUCCESS; CloseRegionProcedure fdaba13ef1aa6e8697903f03e0a7fc29, server=6e6711bf2d40,33095,1733530964767 in 280 msec 2024-12-07T00:24:48,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T00:24:48,917 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=117, resume processing ppid=115 2024-12-07T00:24:48,918 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=117, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fdaba13ef1aa6e8697903f03e0a7fc29, UNASSIGN in 289 msec 2024-12-07T00:24:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742117_1293 (size=84) 2024-12-07T00:24:48,953 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742117_1293 (size=84) 2024-12-07T00:24:48,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742117_1293 (size=84) 2024-12-07T00:24:48,958 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:48,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742118_1294 (size=20) 2024-12-07T00:24:48,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742118_1294 (size=20) 2024-12-07T00:24:48,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742118_1294 (size=20) 2024-12-07T00:24:48,996 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:49,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742119_1295 (size=21) 2024-12-07T00:24:49,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742119_1295 (size=21) 2024-12-07T00:24:49,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742119_1295 (size=21) 2024-12-07T00:24:49,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742120_1296 (size=84) 2024-12-07T00:24:49,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742120_1296 (size=84) 2024-12-07T00:24:49,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742120_1296 (size=84) 2024-12-07T00:24:49,072 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:49,091 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-12-07T00:24:49,093 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087953.6c4dec1bc7503af64d0f67bf7ec5a5f9.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:49,093 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1733531087953.fdaba13ef1aa6e8697903f03e0a7fc29.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:49,094 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-12-07T00:24:49,129 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, ASSIGN}] 2024-12-07T00:24:49,132 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, ASSIGN 2024-12-07T00:24:49,132 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=120, ppid=115, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, ASSIGN; state=MERGED, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:24:49,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T00:24:49,283 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-12-07T00:24:49,283 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=45558a416e5cf43cb4193d8fc7875e25, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:49,285 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=121, ppid=120, state=RUNNABLE; OpenRegionProcedure 45558a416e5cf43cb4193d8fc7875e25, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:24:49,437 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:49,444 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:24:49,445 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7285): Opening region: {ENCODED => 45558a416e5cf43cb4193d8fc7875e25, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.', STARTKEY => '', ENDKEY => ''} 2024-12-07T00:24:49,445 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. service=AccessControlService 2024-12-07T00:24:49,445 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:24:49,446 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,446 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:24:49,446 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7327): checking encryption for 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,446 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(7330): checking classloading for 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,448 INFO [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,449 INFO [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 45558a416e5cf43cb4193d8fc7875e25 columnFamilyName cf 2024-12-07T00:24:49,449 DEBUG [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:24:49,477 DEBUG [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/d2a9235fcf104a3dbab9f9ba3834164b.fdaba13ef1aa6e8697903f03e0a7fc29->hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b-top 2024-12-07T00:24:49,484 DEBUG [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] regionserver.StoreEngine(277): loaded hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/e8900aeda8b5454ea1cc00ef9ccf9678.6c4dec1bc7503af64d0f67bf7ec5a5f9->hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678-top 2024-12-07T00:24:49,485 INFO [StoreOpener-45558a416e5cf43cb4193d8fc7875e25-1 {}] regionserver.HStore(327): Store=45558a416e5cf43cb4193d8fc7875e25/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:24:49,486 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,488 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,490 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1085): writing seq id for 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,491 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1102): Opened 45558a416e5cf43cb4193d8fc7875e25; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64903065, jitterRate=-0.032868966460227966}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:24:49,492 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegion(1001): Region open journal for 45558a416e5cf43cb4193d8fc7875e25: 2024-12-07T00:24:49,493 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25., pid=121, masterSystemTime=1733531089437 2024-12-07T00:24:49,494 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.,because compaction is disabled. 2024-12-07T00:24:49,497 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:24:49,497 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=120 updating hbase:meta row=45558a416e5cf43cb4193d8fc7875e25, regionState=OPEN, openSeqNum=9, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:49,498 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=121}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:24:49,503 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=121, resume processing ppid=120 2024-12-07T00:24:49,504 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=121, ppid=120, state=SUCCESS; OpenRegionProcedure 45558a416e5cf43cb4193d8fc7875e25, server=6e6711bf2d40,36059,1733530964705 in 216 msec 2024-12-07T00:24:49,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=120, resume processing ppid=115 2024-12-07T00:24:49,506 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=120, ppid=115, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, ASSIGN in 375 msec 2024-12-07T00:24:49,517 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=115, state=SUCCESS; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[6c4dec1bc7503af64d0f67bf7ec5a5f9, fdaba13ef1aa6e8697903f03e0a7fc29], force=true in 904 msec 2024-12-07T00:24:49,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=115 2024-12-07T00:24:49,719 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 115 completed 2024-12-07T00:24:49,720 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-07T00:24:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531089720 (current time:1733531089720). 2024-12-07T00:24:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:24:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-12-07T00:24:49,720 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:24:49,722 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x69b561de to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@5a1be506 2024-12-07T00:24:49,725 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61b75e53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:49,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:49,728 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:49,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x69b561de to 127.0.0.1:60354 2024-12-07T00:24:49,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:49,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0fa85aee to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@a9ebdc8 2024-12-07T00:24:49,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@51003f0e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:24:49,735 DEBUG [hconnection-0x175f9d04-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:49,737 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52868, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:49,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:24:49,739 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45396, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:24:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0fa85aee to 127.0.0.1:60354 2024-12-07T00:24:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:24:49,740 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-12-07T00:24:49,741 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:24:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-12-07T00:24:49,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-07T00:24:49,743 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:24:49,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T00:24:49,744 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:24:49,751 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:24:49,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742121_1297 (size=216) 2024-12-07T00:24:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742121_1297 (size=216) 2024-12-07T00:24:49,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742121_1297 (size=216) 2024-12-07T00:24:49,766 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:24:49,766 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45558a416e5cf43cb4193d8fc7875e25}] 2024-12-07T00:24:49,767 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T00:24:49,920 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:24:49,922 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-12-07T00:24:49,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:24:49,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2538): Flush status journal for 45558a416e5cf43cb4193d8fc7875e25: 2024-12-07T00:24:49,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-12-07T00:24:49,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:24:49,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/d2a9235fcf104a3dbab9f9ba3834164b.fdaba13ef1aa6e8697903f03e0a7fc29->hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b-top, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/e8900aeda8b5454ea1cc00ef9ccf9678.6c4dec1bc7503af64d0f67bf7ec5a5f9->hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678-top] hfiles 2024-12-07T00:24:49,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/d2a9235fcf104a3dbab9f9ba3834164b.fdaba13ef1aa6e8697903f03e0a7fc29 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/e8900aeda8b5454ea1cc00ef9ccf9678.6c4dec1bc7503af64d0f67bf7ec5a5f9 for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742122_1298 (size=269) 2024-12-07T00:24:49,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742122_1298 (size=269) 2024-12-07T00:24:49,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742122_1298 (size=269) 2024-12-07T00:24:49,934 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:24:49,934 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-12-07T00:24:49,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=123 2024-12-07T00:24:49,935 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,935 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=123, ppid=122, state=RUNNABLE; SnapshotRegionProcedure 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:24:49,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=123, resume processing ppid=122 2024-12-07T00:24:49,944 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=123, ppid=122, state=SUCCESS; SnapshotRegionProcedure 45558a416e5cf43cb4193d8fc7875e25 in 170 msec 2024-12-07T00:24:49,944 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:24:49,945 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:24:49,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:24:49,946 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,947 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742123_1299 (size=670) 2024-12-07T00:24:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742123_1299 (size=670) 2024-12-07T00:24:49,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742123_1299 (size=670) 2024-12-07T00:24:49,980 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:24:49,986 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:24:49,987 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:49,988 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:24:49,988 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-12-07T00:24:49,989 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=122, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 247 msec 2024-12-07T00:24:50,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=122 2024-12-07T00:24:50,046 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 122 completed 2024-12-07T00:24:50,046 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046 2024-12-07T00:24:50,046 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:50,091 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:24:50,091 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:50,093 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:24:50,099 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:50,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742124_1300 (size=216) 2024-12-07T00:24:50,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742124_1300 (size=216) 2024-12-07T00:24:50,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742124_1300 (size=216) 2024-12-07T00:24:50,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742125_1301 (size=670) 2024-12-07T00:24:50,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742125_1301 (size=670) 2024-12-07T00:24:50,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742125_1301 (size=670) 2024-12-07T00:24:50,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:50,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:50,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:50,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:50,215 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0005_000001 (auth:SIMPLE) from 127.0.0.1:46198 2024-12-07T00:24:50,231 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000001/launch_container.sh] 2024-12-07T00:24:50,232 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000001/container_tokens] 2024-12-07T00:24:50,232 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0005/container_1733530972728_0005_01_000001/sysfs] 2024-12-07T00:24:51,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-8019323272308534008.jar 2024-12-07T00:24:51,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,143 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,213 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-3837807474029298099.jar 2024-12-07T00:24:51,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:24:51,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:24:51,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:24:51,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:24:51,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:24:51,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:24:51,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:24:51,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:24:51,216 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:24:51,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:24:51,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:24:51,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:24:51,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:24:51,217 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:51,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:51,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:51,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:51,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:24:51,218 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:51,219 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:24:51,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742126_1302 (size=127628) 2024-12-07T00:24:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742126_1302 (size=127628) 2024-12-07T00:24:51,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742126_1302 (size=127628) 2024-12-07T00:24:51,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T00:24:51,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T00:24:51,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742127_1303 (size=2172101) 2024-12-07T00:24:51,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742128_1304 (size=213228) 2024-12-07T00:24:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742128_1304 (size=213228) 2024-12-07T00:24:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742128_1304 (size=213228) 2024-12-07T00:24:51,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T00:24:51,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T00:24:51,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742129_1305 (size=1877034) 2024-12-07T00:24:51,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742130_1306 (size=533455) 2024-12-07T00:24:51,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742130_1306 (size=533455) 2024-12-07T00:24:51,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742130_1306 (size=533455) 2024-12-07T00:24:51,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742131_1307 (size=7280644) 2024-12-07T00:24:51,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742131_1307 (size=7280644) 2024-12-07T00:24:51,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742131_1307 (size=7280644) 2024-12-07T00:24:51,379 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:24:51,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742132_1308 (size=4188619) 2024-12-07T00:24:51,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742132_1308 (size=4188619) 2024-12-07T00:24:51,380 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742132_1308 (size=4188619) 2024-12-07T00:24:51,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742133_1309 (size=20406) 2024-12-07T00:24:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742133_1309 (size=20406) 2024-12-07T00:24:51,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742133_1309 (size=20406) 2024-12-07T00:24:51,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742134_1310 (size=75495) 2024-12-07T00:24:51,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742134_1310 (size=75495) 2024-12-07T00:24:51,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742134_1310 (size=75495) 2024-12-07T00:24:51,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742135_1311 (size=45609) 2024-12-07T00:24:51,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742135_1311 (size=45609) 2024-12-07T00:24:51,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742135_1311 (size=45609) 2024-12-07T00:24:51,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742136_1312 (size=110084) 2024-12-07T00:24:51,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742136_1312 (size=110084) 2024-12-07T00:24:51,432 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742136_1312 (size=110084) 2024-12-07T00:24:51,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742137_1313 (size=6350154) 2024-12-07T00:24:51,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742137_1313 (size=6350154) 2024-12-07T00:24:51,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742137_1313 (size=6350154) 2024-12-07T00:24:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T00:24:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T00:24:51,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742138_1314 (size=1323991) 2024-12-07T00:24:51,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742139_1315 (size=23076) 2024-12-07T00:24:51,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742139_1315 (size=23076) 2024-12-07T00:24:51,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742139_1315 (size=23076) 2024-12-07T00:24:51,483 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742140_1316 (size=126803) 2024-12-07T00:24:51,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742140_1316 (size=126803) 2024-12-07T00:24:51,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742140_1316 (size=126803) 2024-12-07T00:24:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742141_1317 (size=322274) 2024-12-07T00:24:51,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742141_1317 (size=322274) 2024-12-07T00:24:51,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742141_1317 (size=322274) 2024-12-07T00:24:51,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T00:24:51,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T00:24:51,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742142_1318 (size=1832290) 2024-12-07T00:24:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742143_1319 (size=451756) 2024-12-07T00:24:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742143_1319 (size=451756) 2024-12-07T00:24:51,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742143_1319 (size=451756) 2024-12-07T00:24:51,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742144_1320 (size=30081) 2024-12-07T00:24:51,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742144_1320 (size=30081) 2024-12-07T00:24:51,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742144_1320 (size=30081) 2024-12-07T00:24:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742145_1321 (size=53616) 2024-12-07T00:24:51,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742145_1321 (size=53616) 2024-12-07T00:24:51,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742145_1321 (size=53616) 2024-12-07T00:24:51,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742146_1322 (size=29229) 2024-12-07T00:24:51,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742146_1322 (size=29229) 2024-12-07T00:24:51,573 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742146_1322 (size=29229) 2024-12-07T00:24:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742147_1323 (size=169089) 2024-12-07T00:24:51,609 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742147_1323 (size=169089) 2024-12-07T00:24:51,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742147_1323 (size=169089) 2024-12-07T00:24:51,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T00:24:51,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T00:24:51,636 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742148_1324 (size=5175431) 2024-12-07T00:24:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742149_1325 (size=136454) 2024-12-07T00:24:51,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742149_1325 (size=136454) 2024-12-07T00:24:51,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742149_1325 (size=136454) 2024-12-07T00:24:51,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742150_1326 (size=907851) 2024-12-07T00:24:51,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742150_1326 (size=907851) 2024-12-07T00:24:51,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742150_1326 (size=907851) 2024-12-07T00:24:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T00:24:51,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T00:24:51,673 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742151_1327 (size=3317408) 2024-12-07T00:24:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742152_1328 (size=503880) 2024-12-07T00:24:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742152_1328 (size=503880) 2024-12-07T00:24:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742152_1328 (size=503880) 2024-12-07T00:24:51,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T00:24:51,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T00:24:51,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742153_1329 (size=4695811) 2024-12-07T00:24:51,717 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:24:51,720 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-12-07T00:24:51,722 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=9.7 K 2024-12-07T00:24:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742154_1330 (size=378) 2024-12-07T00:24:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742154_1330 (size=378) 2024-12-07T00:24:51,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742154_1330 (size=378) 2024-12-07T00:24:51,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742155_1331 (size=15) 2024-12-07T00:24:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742155_1331 (size=15) 2024-12-07T00:24:51,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742155_1331 (size=15) 2024-12-07T00:24:51,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742156_1332 (size=304940) 2024-12-07T00:24:51,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742156_1332 (size=304940) 2024-12-07T00:24:51,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742156_1332 (size=304940) 2024-12-07T00:24:51,793 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:51,793 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:24:52,215 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0006_000001 (auth:SIMPLE) from 127.0.0.1:46214 2024-12-07T00:24:54,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:24:54,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-12-07T00:24:54,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:24:54,378 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-12-07T00:24:54,379 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-12-07T00:24:58,408 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0006_000001 (auth:SIMPLE) from 127.0.0.1:49394 2024-12-07T00:24:58,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742157_1333 (size=350614) 2024-12-07T00:24:58,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742157_1333 (size=350614) 2024-12-07T00:24:58,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742157_1333 (size=350614) 2024-12-07T00:24:59,881 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:25:00,707 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0006_000001 (auth:SIMPLE) from 127.0.0.1:47082 2024-12-07T00:25:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742158_1334 (size=4945) 2024-12-07T00:25:05,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742158_1334 (size=4945) 2024-12-07T00:25:05,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742158_1334 (size=4945) 2024-12-07T00:25:05,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742159_1335 (size=4945) 2024-12-07T00:25:05,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742159_1335 (size=4945) 2024-12-07T00:25:05,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742159_1335 (size=4945) 2024-12-07T00:25:05,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742160_1336 (size=17474) 2024-12-07T00:25:05,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742160_1336 (size=17474) 2024-12-07T00:25:05,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742160_1336 (size=17474) 2024-12-07T00:25:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742161_1337 (size=482) 2024-12-07T00:25:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742161_1337 (size=482) 2024-12-07T00:25:05,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742161_1337 (size=482) 2024-12-07T00:25:05,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742162_1338 (size=17474) 2024-12-07T00:25:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742162_1338 (size=17474) 2024-12-07T00:25:05,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742162_1338 (size=17474) 2024-12-07T00:25:05,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742163_1339 (size=350614) 2024-12-07T00:25:05,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742163_1339 (size=350614) 2024-12-07T00:25:05,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742163_1339 (size=350614) 2024-12-07T00:25:07,206 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:25:07,207 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:25:07,214 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,214 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:25:07,214 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:25:07,214 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-07T00:25:07,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-07T00:25:07,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-12-07T00:25:07,215 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531090046/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-12-07T00:25:07,222 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,222 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=124, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,225 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T00:25:07,225 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531107225"}]},"ts":"1733531107225"} 2024-12-07T00:25:07,227 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-12-07T00:25:07,229 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-12-07T00:25:07,230 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=125, ppid=124, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-12-07T00:25:07,231 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, UNASSIGN}] 2024-12-07T00:25:07,231 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=126, ppid=125, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, UNASSIGN 2024-12-07T00:25:07,232 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=45558a416e5cf43cb4193d8fc7875e25, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:07,235 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:07,235 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE; CloseRegionProcedure 45558a416e5cf43cb4193d8fc7875e25, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:25:07,326 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T00:25:07,387 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:07,387 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(124): Close 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:25:07,388 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:07,388 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1681): Closing 45558a416e5cf43cb4193d8fc7875e25, disabling compactions & flushes 2024-12-07T00:25:07,388 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:25:07,388 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:25:07,388 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. after waiting 0 ms 2024-12-07T00:25:07,388 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:25:07,393 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-12-07T00:25:07,394 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:07,394 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25. 2024-12-07T00:25:07,394 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] regionserver.HRegion(1635): Region close journal for 45558a416e5cf43cb4193d8fc7875e25: 2024-12-07T00:25:07,396 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=127}] handler.UnassignRegionHandler(170): Closed 45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:25:07,396 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=126 updating hbase:meta row=45558a416e5cf43cb4193d8fc7875e25, regionState=CLOSED 2024-12-07T00:25:07,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=127, resume processing ppid=126 2024-12-07T00:25:07,399 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=127, ppid=126, state=SUCCESS; CloseRegionProcedure 45558a416e5cf43cb4193d8fc7875e25, server=6e6711bf2d40,36059,1733530964705 in 162 msec 2024-12-07T00:25:07,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=126, resume processing ppid=125 2024-12-07T00:25:07,400 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=126, ppid=125, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=45558a416e5cf43cb4193d8fc7875e25, UNASSIGN in 168 msec 2024-12-07T00:25:07,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=125, resume processing ppid=124 2024-12-07T00:25:07,402 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=125, ppid=124, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 172 msec 2024-12-07T00:25:07,403 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531107403"}]},"ts":"1733531107403"} 2024-12-07T00:25:07,404 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-12-07T00:25:07,406 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-12-07T00:25:07,408 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=124, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 185 msec 2024-12-07T00:25:07,527 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=124 2024-12-07T00:25:07,527 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 124 completed 2024-12-07T00:25:07,528 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,529 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,530 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=128, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,530 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,531 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=128, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,532 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,534 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:25:07,535 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:25:07,535 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:25:07,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T00:25:07,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T00:25:07,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T00:25:07,536 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-12-07T00:25:07,537 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/recovered.edits] 2024-12-07T00:25:07,537 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/recovered.edits] 2024-12-07T00:25:07,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,538 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/recovered.edits] 2024-12-07T00:25:07,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:07,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:07,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-07T00:25:07,539 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:07,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:07,543 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/cf/d2a9235fcf104a3dbab9f9ba3834164b 2024-12-07T00:25:07,543 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/cf/e8900aeda8b5454ea1cc00ef9ccf9678 2024-12-07T00:25:07,546 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/recovered.edits/8.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9/recovered.edits/8.seqid 2024-12-07T00:25:07,546 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/recovered.edits/8.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29/recovered.edits/8.seqid 2024-12-07T00:25:07,546 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/d2a9235fcf104a3dbab9f9ba3834164b.fdaba13ef1aa6e8697903f03e0a7fc29 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/d2a9235fcf104a3dbab9f9ba3834164b.fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:25:07,547 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:25:07,547 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fdaba13ef1aa6e8697903f03e0a7fc29 2024-12-07T00:25:07,548 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/e8900aeda8b5454ea1cc00ef9ccf9678.6c4dec1bc7503af64d0f67bf7ec5a5f9 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/cf/e8900aeda8b5454ea1cc00ef9ccf9678.6c4dec1bc7503af64d0f67bf7ec5a5f9 2024-12-07T00:25:07,551 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/recovered.edits/12.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25/recovered.edits/12.seqid 2024-12-07T00:25:07,551 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/45558a416e5cf43cb4193d8fc7875e25 2024-12-07T00:25:07,551 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-12-07T00:25:07,553 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=128, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,556 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-12-07T00:25:07,560 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-12-07T00:25:07,561 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=128, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,561 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-12-07T00:25:07,561 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531107561"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:07,563 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 1 regions from META 2024-12-07T00:25:07,563 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 45558a416e5cf43cb4193d8fc7875e25, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25.', STARTKEY => '', ENDKEY => ''}] 2024-12-07T00:25:07,563 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-12-07T00:25:07,563 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531107563"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:07,565 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-12-07T00:25:07,566 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=128, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:07,567 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=128, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 37 msec 2024-12-07T00:25:07,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=128 2024-12-07T00:25:07,641 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1, procId: 128 completed 2024-12-07T00:25:07,641 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,642 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=129, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T00:25:07,645 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531107645"}]},"ts":"1733531107645"} 2024-12-07T00:25:07,646 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-12-07T00:25:07,658 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-12-07T00:25:07,658 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=130, ppid=129, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-12-07T00:25:07,660 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, UNASSIGN}, {pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, UNASSIGN}] 2024-12-07T00:25:07,661 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=132, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, UNASSIGN 2024-12-07T00:25:07,661 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=131, ppid=130, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, UNASSIGN 2024-12-07T00:25:07,662 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=a7e461ebe7d31d10a19f22027b1fb153, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:07,662 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=1dd98b42eeed044390eae525345b1c17, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:07,663 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:07,663 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE; CloseRegionProcedure a7e461ebe7d31d10a19f22027b1fb153, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:25:07,664 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:07,664 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=134, ppid=131, state=RUNNABLE; CloseRegionProcedure 1dd98b42eeed044390eae525345b1c17, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:25:07,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T00:25:07,815 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:07,816 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(124): Close a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:25:07,816 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:07,816 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:07,816 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1681): Closing a7e461ebe7d31d10a19f22027b1fb153, disabling compactions & flushes 2024-12-07T00:25:07,816 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:25:07,816 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:25:07,816 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. after waiting 0 ms 2024-12-07T00:25:07,816 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:25:07,816 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(124): Close 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:25:07,817 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:07,817 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1681): Closing 1dd98b42eeed044390eae525345b1c17, disabling compactions & flushes 2024-12-07T00:25:07,817 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:25:07,817 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:25:07,817 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. after waiting 0 ms 2024-12-07T00:25:07,817 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:25:07,821 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:07,822 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:07,822 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:07,822 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17. 2024-12-07T00:25:07,822 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] regionserver.HRegion(1635): Region close journal for 1dd98b42eeed044390eae525345b1c17: 2024-12-07T00:25:07,822 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:07,822 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153. 2024-12-07T00:25:07,822 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] regionserver.HRegion(1635): Region close journal for a7e461ebe7d31d10a19f22027b1fb153: 2024-12-07T00:25:07,823 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=134}] handler.UnassignRegionHandler(170): Closed 1dd98b42eeed044390eae525345b1c17 2024-12-07T00:25:07,824 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=131 updating hbase:meta row=1dd98b42eeed044390eae525345b1c17, regionState=CLOSED 2024-12-07T00:25:07,824 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=133}] handler.UnassignRegionHandler(170): Closed a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:25:07,824 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=132 updating hbase:meta row=a7e461ebe7d31d10a19f22027b1fb153, regionState=CLOSED 2024-12-07T00:25:07,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=134, resume processing ppid=131 2024-12-07T00:25:07,826 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=134, ppid=131, state=SUCCESS; CloseRegionProcedure 1dd98b42eeed044390eae525345b1c17, server=6e6711bf2d40,36059,1733530964705 in 161 msec 2024-12-07T00:25:07,828 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=131, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=1dd98b42eeed044390eae525345b1c17, UNASSIGN in 167 msec 2024-12-07T00:25:07,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=133, resume processing ppid=132 2024-12-07T00:25:07,828 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=133, ppid=132, state=SUCCESS; CloseRegionProcedure a7e461ebe7d31d10a19f22027b1fb153, server=6e6711bf2d40,43281,1733530964846 in 163 msec 2024-12-07T00:25:07,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=132, resume processing ppid=130 2024-12-07T00:25:07,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=132, ppid=130, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a7e461ebe7d31d10a19f22027b1fb153, UNASSIGN in 168 msec 2024-12-07T00:25:07,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=130, resume processing ppid=129 2024-12-07T00:25:07,831 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=130, ppid=129, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 171 msec 2024-12-07T00:25:07,832 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531107832"}]},"ts":"1733531107832"} 2024-12-07T00:25:07,833 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-12-07T00:25:07,835 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-12-07T00:25:07,836 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=129, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 193 msec 2024-12-07T00:25:07,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=129 2024-12-07T00:25:07,947 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 129 completed 2024-12-07T00:25:07,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,949 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=135, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,950 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=135, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,951 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,954 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17 2024-12-07T00:25:07,954 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:25:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,954 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,955 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T00:25:07,955 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T00:25:07,956 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,957 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data null 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,957 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:07,957 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/recovered.edits] 2024-12-07T00:25:07,957 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/recovered.edits] 2024-12-07T00:25:07,958 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-07T00:25:07,961 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/cf/409924b3fa254592ab7430a5da4779a5 2024-12-07T00:25:07,961 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/cf/3b7a9031410f4c188a69a91cbd2d1989 2024-12-07T00:25:07,964 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153/recovered.edits/9.seqid 2024-12-07T00:25:07,964 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17/recovered.edits/9.seqid 2024-12-07T00:25:07,965 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/a7e461ebe7d31d10a19f22027b1fb153 2024-12-07T00:25:07,965 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithMergeRegion/1dd98b42eeed044390eae525345b1c17 2024-12-07T00:25:07,965 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-12-07T00:25:07,967 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=135, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,969 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-12-07T00:25:07,971 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-12-07T00:25:07,972 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=135, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,972 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-12-07T00:25:07,972 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531107972"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:07,972 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531107972"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:07,973 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:25:07,973 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 1dd98b42eeed044390eae525345b1c17, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1733531086159.1dd98b42eeed044390eae525345b1c17.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a7e461ebe7d31d10a19f22027b1fb153, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1733531086159.a7e461ebe7d31d10a19f22027b1fb153.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:25:07,973 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-12-07T00:25:07,973 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531107973"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:07,974 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-12-07T00:25:07,976 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=135, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:07,977 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=135, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 28 msec 2024-12-07T00:25:08,059 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=135 2024-12-07T00:25:08,059 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion, procId: 135 completed 2024-12-07T00:25:08,067 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-07T00:25:08,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:08,070 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" 2024-12-07T00:25:08,072 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:08,073 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" 2024-12-07T00:25:08,074 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:08,095 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=795 (was 781) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-34 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-4849 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-31 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-30 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:36528 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-33 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-29 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-514840348_1 at /127.0.0.1:55390 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:33965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:33268 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-32 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:55404 [Waiting for operation #8] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/6e6711bf2d40:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 344) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=790 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=717 (was 736), ProcessCount=17 (was 18), AvailableMemoryMB=2889 (was 3068) 2024-12-07T00:25:08,095 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-07T00:25:08,112 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=795, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=717, ProcessCount=17, AvailableMemoryMB=2888 2024-12-07T00:25:08,112 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=795 is superior to 500 2024-12-07T00:25:08,114 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:25:08,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:08,115 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:25:08,115 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:08,115 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 136 2024-12-07T00:25:08,116 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:25:08,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T00:25:08,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742164_1340 (size=407) 2024-12-07T00:25:08,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742164_1340 (size=407) 2024-12-07T00:25:08,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742164_1340 (size=407) 2024-12-07T00:25:08,125 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f7a4e93ffe965017ee70266453a2811f, NAME => 'testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:08,125 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 1eccefe77eaed05be3d668802482d57a, NAME => 'testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:08,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742165_1341 (size=68) 2024-12-07T00:25:08,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742165_1341 (size=68) 2024-12-07T00:25:08,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742165_1341 (size=68) 2024-12-07T00:25:08,138 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:08,138 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing f7a4e93ffe965017ee70266453a2811f, disabling compactions & flushes 2024-12-07T00:25:08,138 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,138 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,138 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. after waiting 0 ms 2024-12-07T00:25:08,138 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,139 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,139 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for f7a4e93ffe965017ee70266453a2811f: 2024-12-07T00:25:08,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742166_1342 (size=68) 2024-12-07T00:25:08,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742166_1342 (size=68) 2024-12-07T00:25:08,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742166_1342 (size=68) 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing 1eccefe77eaed05be3d668802482d57a, disabling compactions & flushes 2024-12-07T00:25:08,142 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. after waiting 0 ms 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,142 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,142 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for 1eccefe77eaed05be3d668802482d57a: 2024-12-07T00:25:08,144 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:25:08,144 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733531108144"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531108144"}]},"ts":"1733531108144"} 2024-12-07T00:25:08,144 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1733531108144"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531108144"}]},"ts":"1733531108144"} 2024-12-07T00:25:08,146 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:25:08,147 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:25:08,147 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531108147"}]},"ts":"1733531108147"} 2024-12-07T00:25:08,148 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-07T00:25:08,152 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:25:08,153 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:25:08,153 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:25:08,153 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:25:08,153 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:25:08,153 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:25:08,153 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:25:08,153 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:25:08,154 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, ASSIGN}, {pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, ASSIGN}] 2024-12-07T00:25:08,154 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, ASSIGN 2024-12-07T00:25:08,155 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, ASSIGN 2024-12-07T00:25:08,155 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=138, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:25:08,156 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=137, ppid=136, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:25:08,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T00:25:08,306 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:25:08,306 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=f7a4e93ffe965017ee70266453a2811f, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:08,306 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=1eccefe77eaed05be3d668802482d57a, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:08,308 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=139, ppid=137, state=RUNNABLE; OpenRegionProcedure f7a4e93ffe965017ee70266453a2811f, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:25:08,308 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=140, ppid=138, state=RUNNABLE; OpenRegionProcedure 1eccefe77eaed05be3d668802482d57a, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:25:08,418 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T00:25:08,459 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:08,459 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:08,462 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7285): Opening region: {ENCODED => 1eccefe77eaed05be3d668802482d57a, NAME => 'testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:25:08,463 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(135): Open testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. service=AccessControlService 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7285): Opening region: {ENCODED => f7a4e93ffe965017ee70266453a2811f, NAME => 'testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:25:08,463 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. service=AccessControlService 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,463 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:08,463 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7327): checking encryption for 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(7330): checking classloading for 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(894): Instantiated testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7327): checking encryption for f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,464 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(7330): checking classloading for f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,465 INFO [StoreOpener-1eccefe77eaed05be3d668802482d57a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,465 INFO [StoreOpener-f7a4e93ffe965017ee70266453a2811f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,467 INFO [StoreOpener-f7a4e93ffe965017ee70266453a2811f-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f7a4e93ffe965017ee70266453a2811f columnFamilyName cf 2024-12-07T00:25:08,467 DEBUG [StoreOpener-f7a4e93ffe965017ee70266453a2811f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:08,467 INFO [StoreOpener-1eccefe77eaed05be3d668802482d57a-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1eccefe77eaed05be3d668802482d57a columnFamilyName cf 2024-12-07T00:25:08,467 DEBUG [StoreOpener-1eccefe77eaed05be3d668802482d57a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:08,467 INFO [StoreOpener-f7a4e93ffe965017ee70266453a2811f-1 {}] regionserver.HStore(327): Store=f7a4e93ffe965017ee70266453a2811f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:08,467 INFO [StoreOpener-1eccefe77eaed05be3d668802482d57a-1 {}] regionserver.HStore(327): Store=1eccefe77eaed05be3d668802482d57a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:08,468 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,468 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,468 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,468 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,471 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1085): writing seq id for f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,471 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1085): writing seq id for 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,473 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:08,473 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1102): Opened f7a4e93ffe965017ee70266453a2811f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67678773, jitterRate=0.008492305874824524}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:08,474 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:08,474 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1102): Opened 1eccefe77eaed05be3d668802482d57a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=74296909, jitterRate=0.10711021721363068}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:08,474 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegion(1001): Region open journal for f7a4e93ffe965017ee70266453a2811f: 2024-12-07T00:25:08,474 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegion(1001): Region open journal for 1eccefe77eaed05be3d668802482d57a: 2024-12-07T00:25:08,475 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a., pid=140, masterSystemTime=1733531108459 2024-12-07T00:25:08,475 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f., pid=139, masterSystemTime=1733531108459 2024-12-07T00:25:08,476 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,476 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=139}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,477 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=137 updating hbase:meta row=f7a4e93ffe965017ee70266453a2811f, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:08,477 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,477 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=140}] handler.AssignRegionHandler(164): Opened testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,477 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=138 updating hbase:meta row=1eccefe77eaed05be3d668802482d57a, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:08,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=139, resume processing ppid=137 2024-12-07T00:25:08,480 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=139, ppid=137, state=SUCCESS; OpenRegionProcedure f7a4e93ffe965017ee70266453a2811f, server=6e6711bf2d40,33095,1733530964767 in 171 msec 2024-12-07T00:25:08,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=140, resume processing ppid=138 2024-12-07T00:25:08,480 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=140, ppid=138, state=SUCCESS; OpenRegionProcedure 1eccefe77eaed05be3d668802482d57a, server=6e6711bf2d40,43281,1733530964846 in 171 msec 2024-12-07T00:25:08,481 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=137, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, ASSIGN in 326 msec 2024-12-07T00:25:08,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=138, resume processing ppid=136 2024-12-07T00:25:08,483 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=138, ppid=136, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, ASSIGN in 326 msec 2024-12-07T00:25:08,483 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:25:08,483 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531108483"}]},"ts":"1733531108483"} 2024-12-07T00:25:08,484 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-07T00:25:08,487 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=136, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:25:08,487 DEBUG [PEWorker-2 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-12-07T00:25:08,489 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T00:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:08,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:08,492 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:08,493 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=136, state=SUCCESS; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 378 msec 2024-12-07T00:25:08,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=136 2024-12-07T00:25:08,720 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 136 completed 2024-12-07T00:25:08,720 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-07T00:25:08,720 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:08,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-07T00:25:08,723 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:08,723 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportExpiredSnapshot assigned. 2024-12-07T00:25:08,726 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T00:25:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531108726 (current time:1733531108726). 2024-12-07T00:25:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T00:25:08,726 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:08,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0ec0ab4a to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4ff1d4cc 2024-12-07T00:25:08,730 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@196f8a3a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:08,732 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:08,733 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45820, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0ec0ab4a to 127.0.0.1:60354 2024-12-07T00:25:08,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:08,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x04940477 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@4582e79d 2024-12-07T00:25:08,739 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@20598b4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:08,741 DEBUG [hconnection-0x4d97fb4-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:08,742 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45832, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:08,743 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:08,744 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59750, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x04940477 to 127.0.0.1:60354 2024-12-07T00:25:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:08,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T00:25:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=141, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T00:25:08,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-07T00:25:08,747 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:08,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T00:25:08,748 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:08,750 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:08,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742167_1343 (size=170) 2024-12-07T00:25:08,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742167_1343 (size=170) 2024-12-07T00:25:08,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742167_1343 (size=170) 2024-12-07T00:25:08,757 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:08,757 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f}, {pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a}] 2024-12-07T00:25:08,758 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,758 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T00:25:08,909 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:08,909 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:08,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-12-07T00:25:08,910 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=143 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2538): Flush status journal for f7a4e93ffe965017ee70266453a2811f: 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.HRegion(2538): Flush status journal for 1eccefe77eaed05be3d668802482d57a: 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-07T00:25:08,910 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:08,911 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:08,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742168_1344 (size=71) 2024-12-07T00:25:08,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742168_1344 (size=71) 2024-12-07T00:25:08,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742168_1344 (size=71) 2024-12-07T00:25:08,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:08,922 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=143}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=143 2024-12-07T00:25:08,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=143 2024-12-07T00:25:08,923 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,923 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=143, ppid=141, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:08,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742169_1345 (size=71) 2024-12-07T00:25:08,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742169_1345 (size=71) 2024-12-07T00:25:08,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742169_1345 (size=71) 2024-12-07T00:25:08,926 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=143, ppid=141, state=SUCCESS; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a in 167 msec 2024-12-07T00:25:08,926 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:08,927 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-12-07T00:25:08,927 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=142 2024-12-07T00:25:08,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,927 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=142, ppid=141, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:08,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=142, resume processing ppid=141 2024-12-07T00:25:08,929 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=142, ppid=141, state=SUCCESS; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f in 171 msec 2024-12-07T00:25:08,929 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:08,930 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:08,931 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:08,931 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:08,931 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:08,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742170_1346 (size=552) 2024-12-07T00:25:08,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742170_1346 (size=552) 2024-12-07T00:25:08,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742170_1346 (size=552) 2024-12-07T00:25:08,959 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:08,963 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:08,963 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:08,964 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=141, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:08,965 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 141 2024-12-07T00:25:08,966 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=141, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=141, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 219 msec 2024-12-07T00:25:09,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=141 2024-12-07T00:25:09,050 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 141 completed 2024-12-07T00:25:09,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:09,062 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:09,066 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-12-07T00:25:09,066 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:09,066 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:09,092 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T00:25:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531109092 (current time:1733531109092). 2024-12-07T00:25:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T00:25:09,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:09,094 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x1e492f85 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@51a2c727 2024-12-07T00:25:09,106 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@223cf4a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:09,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:09,109 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45836, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:09,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x1e492f85 to 127.0.0.1:60354 2024-12-07T00:25:09,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:09,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x31b7da6a to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@766ba884 2024-12-07T00:25:09,117 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33b7c306, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:09,119 DEBUG [hconnection-0x7a0a55a6-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:09,120 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45844, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:09,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:09,122 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59760, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:09,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x31b7da6a to 127.0.0.1:60354 2024-12-07T00:25:09,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:09,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T00:25:09,124 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:09,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=144, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-12-07T00:25:09,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-07T00:25:09,126 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:09,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T00:25:09,127 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:09,129 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:09,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742171_1347 (size=165) 2024-12-07T00:25:09,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742171_1347 (size=165) 2024-12-07T00:25:09,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742171_1347 (size=165) 2024-12-07T00:25:09,136 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:09,136 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f}, {pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a}] 2024-12-07T00:25:09,137 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:09,137 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:09,227 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T00:25:09,288 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:09,288 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:09,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=145 2024-12-07T00:25:09,289 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=146 2024-12-07T00:25:09,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:09,289 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:09,289 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2837): Flushing f7a4e93ffe965017ee70266453a2811f 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T00:25:09,289 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2837): Flushing 1eccefe77eaed05be3d668802482d57a 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T00:25:09,307 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/.tmp/cf/ff61afc9c400429aa9f9d80dca814fb7 is 71, key is 167b9a15f3a6f2a3850a23c64d29a10c/cf:q/1733531109062/Put/seqid=0 2024-12-07T00:25:09,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742172_1348 (size=8188) 2024-12-07T00:25:09,312 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/.tmp/cf/654acdb3fad64742b543dbc11ffbc036 is 71, key is 02c4f47c969afb8a4951b8c5b8595875/cf:q/1733531109062/Put/seqid=0 2024-12-07T00:25:09,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742172_1348 (size=8188) 2024-12-07T00:25:09,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742172_1348 (size=8188) 2024-12-07T00:25:09,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/.tmp/cf/ff61afc9c400429aa9f9d80dca814fb7 2024-12-07T00:25:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742173_1349 (size=5422) 2024-12-07T00:25:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742173_1349 (size=5422) 2024-12-07T00:25:09,321 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742173_1349 (size=5422) 2024-12-07T00:25:09,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/.tmp/cf/ff61afc9c400429aa9f9d80dca814fb7 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7 2024-12-07T00:25:09,327 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7, entries=45, sequenceid=6, filesize=8.0 K 2024-12-07T00:25:09,328 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 1eccefe77eaed05be3d668802482d57a in 39ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:09,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-12-07T00:25:09,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.HRegion(2538): Flush status journal for 1eccefe77eaed05be3d668802482d57a: 2024-12-07T00:25:09,328 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. for snaptb0-testExportExpiredSnapshot completed. 2024-12-07T00:25:09,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:09,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7] hfiles 2024-12-07T00:25:09,329 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742174_1350 (size=110) 2024-12-07T00:25:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742174_1350 (size=110) 2024-12-07T00:25:09,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742174_1350 (size=110) 2024-12-07T00:25:09,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:09,336 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=146}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=146 2024-12-07T00:25:09,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=146 2024-12-07T00:25:09,336 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:09,336 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=146, ppid=144, state=RUNNABLE; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:09,338 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=146, ppid=144, state=SUCCESS; SnapshotRegionProcedure 1eccefe77eaed05be3d668802482d57a in 201 msec 2024-12-07T00:25:09,428 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T00:25:09,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/.tmp/cf/654acdb3fad64742b543dbc11ffbc036 2024-12-07T00:25:09,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/.tmp/cf/654acdb3fad64742b543dbc11ffbc036 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036 2024-12-07T00:25:09,729 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T00:25:09,733 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036, entries=5, sequenceid=6, filesize=5.3 K 2024-12-07T00:25:09,734 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for f7a4e93ffe965017ee70266453a2811f in 445ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.HRegion(2538): Flush status journal for f7a4e93ffe965017ee70266453a2811f: 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. for snaptb0-testExportExpiredSnapshot completed. 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036] hfiles 2024-12-07T00:25:09,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036 for snapshot=snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742175_1351 (size=110) 2024-12-07T00:25:09,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742175_1351 (size=110) 2024-12-07T00:25:09,741 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742175_1351 (size=110) 2024-12-07T00:25:09,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:09,742 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=145}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=145 2024-12-07T00:25:09,742 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=145 2024-12-07T00:25:09,742 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:09,743 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=145, ppid=144, state=RUNNABLE; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:09,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=145, resume processing ppid=144 2024-12-07T00:25:09,745 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=145, ppid=144, state=SUCCESS; SnapshotRegionProcedure f7a4e93ffe965017ee70266453a2811f in 608 msec 2024-12-07T00:25:09,745 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:09,746 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:09,746 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:09,747 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,747 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742176_1352 (size=630) 2024-12-07T00:25:09,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742176_1352 (size=630) 2024-12-07T00:25:09,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742176_1352 (size=630) 2024-12-07T00:25:09,768 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:09,784 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:09,784 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:09,786 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=144, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:09,786 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 144 2024-12-07T00:25:09,787 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=144, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=144, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 662 msec 2024-12-07T00:25:10,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=144 2024-12-07T00:25:10,231 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot, procId: 144 completed 2024-12-07T00:25:10,233 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:25:10,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testExportExpiredSnapshot 2024-12-07T00:25:10,235 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:25:10,235 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:10,236 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 147 2024-12-07T00:25:10,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T00:25:10,240 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:25:10,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742177_1353 (size=400) 2024-12-07T00:25:10,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742177_1353 (size=400) 2024-12-07T00:25:10,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742177_1353 (size=400) 2024-12-07T00:25:10,263 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => aa44abbb59514f8b493e3d258a51d1fb, NAME => 'testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:10,265 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => c613c7c0d4bf4a8124f2c33a8aabebf4, NAME => 'testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742178_1354 (size=61) 2024-12-07T00:25:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742178_1354 (size=61) 2024-12-07T00:25:10,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742178_1354 (size=61) 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1681): Closing c613c7c0d4bf4a8124f2c33a8aabebf4, disabling compactions & flushes 2024-12-07T00:25:10,296 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. after waiting 0 ms 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,296 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,296 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1635): Region close journal for c613c7c0d4bf4a8124f2c33a8aabebf4: 2024-12-07T00:25:10,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742179_1355 (size=61) 2024-12-07T00:25:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742179_1355 (size=61) 2024-12-07T00:25:10,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742179_1355 (size=61) 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1681): Closing aa44abbb59514f8b493e3d258a51d1fb, disabling compactions & flushes 2024-12-07T00:25:10,306 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. after waiting 0 ms 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,306 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,306 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1635): Region close journal for aa44abbb59514f8b493e3d258a51d1fb: 2024-12-07T00:25:10,307 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:25:10,307 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733531110307"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531110307"}]},"ts":"1733531110307"} 2024-12-07T00:25:10,307 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1733531110307"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531110307"}]},"ts":"1733531110307"} 2024-12-07T00:25:10,310 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:25:10,310 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:25:10,311 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531110311"}]},"ts":"1733531110311"} 2024-12-07T00:25:10,315 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-12-07T00:25:10,319 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:25:10,321 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:25:10,321 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:25:10,321 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:25:10,321 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:25:10,321 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:25:10,321 INFO [PEWorker-3 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:25:10,321 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:25:10,321 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa44abbb59514f8b493e3d258a51d1fb, ASSIGN}, {pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c613c7c0d4bf4a8124f2c33a8aabebf4, ASSIGN}] 2024-12-07T00:25:10,322 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c613c7c0d4bf4a8124f2c33a8aabebf4, ASSIGN 2024-12-07T00:25:10,322 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa44abbb59514f8b493e3d258a51d1fb, ASSIGN 2024-12-07T00:25:10,323 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=148, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa44abbb59514f8b493e3d258a51d1fb, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:25:10,323 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=149, ppid=147, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c613c7c0d4bf4a8124f2c33a8aabebf4, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:25:10,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T00:25:10,474 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:25:10,474 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=c613c7c0d4bf4a8124f2c33a8aabebf4, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:10,474 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=aa44abbb59514f8b493e3d258a51d1fb, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:10,477 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=150, ppid=149, state=RUNNABLE; OpenRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:25:10,480 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=151, ppid=148, state=RUNNABLE; OpenRegionProcedure aa44abbb59514f8b493e3d258a51d1fb, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:25:10,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T00:25:10,606 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0006/container_1733530972728_0006_01_000002/launch_container.sh] 2024-12-07T00:25:10,606 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0006/container_1733530972728_0006_01_000002/container_tokens] 2024-12-07T00:25:10,606 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0006/container_1733530972728_0006_01_000002/sysfs] 2024-12-07T00:25:10,630 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:10,633 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:10,634 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,634 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7285): Opening region: {ENCODED => c613c7c0d4bf4a8124f2c33a8aabebf4, NAME => 'testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:25:10,634 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. service=AccessControlService 2024-12-07T00:25:10,635 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:10,635 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,635 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:10,635 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7327): checking encryption for c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,635 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(7330): checking classloading for c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,637 INFO [StoreOpener-c613c7c0d4bf4a8124f2c33a8aabebf4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,638 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(135): Open testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,638 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7285): Opening region: {ENCODED => aa44abbb59514f8b493e3d258a51d1fb, NAME => 'testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:25:10,639 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7999): Registered coprocessor service: region=testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. service=AccessControlService 2024-12-07T00:25:10,639 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:10,639 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,639 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(894): Instantiated testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:10,639 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7327): checking encryption for aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,639 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(7330): checking classloading for aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,641 INFO [StoreOpener-c613c7c0d4bf4a8124f2c33a8aabebf4-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c613c7c0d4bf4a8124f2c33a8aabebf4 columnFamilyName cf 2024-12-07T00:25:10,641 INFO [StoreOpener-aa44abbb59514f8b493e3d258a51d1fb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,641 DEBUG [StoreOpener-c613c7c0d4bf4a8124f2c33a8aabebf4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:10,643 INFO [StoreOpener-aa44abbb59514f8b493e3d258a51d1fb-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa44abbb59514f8b493e3d258a51d1fb columnFamilyName cf 2024-12-07T00:25:10,643 DEBUG [StoreOpener-aa44abbb59514f8b493e3d258a51d1fb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:10,643 INFO [StoreOpener-aa44abbb59514f8b493e3d258a51d1fb-1 {}] regionserver.HStore(327): Store=aa44abbb59514f8b493e3d258a51d1fb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:10,644 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,644 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,646 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1085): writing seq id for aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:10,648 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:10,648 INFO [StoreOpener-c613c7c0d4bf4a8124f2c33a8aabebf4-1 {}] regionserver.HStore(327): Store=c613c7c0d4bf4a8124f2c33a8aabebf4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:10,649 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,649 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1102): Opened aa44abbb59514f8b493e3d258a51d1fb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=58766252, jitterRate=-0.12431460618972778}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:10,649 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegion(1001): Region open journal for aa44abbb59514f8b493e3d258a51d1fb: 2024-12-07T00:25:10,650 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,651 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1085): writing seq id for c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,653 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:10,654 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1102): Opened c613c7c0d4bf4a8124f2c33a8aabebf4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70100344, jitterRate=0.04457652568817139}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:10,654 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegion(1001): Region open journal for c613c7c0d4bf4a8124f2c33a8aabebf4: 2024-12-07T00:25:10,655 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4., pid=150, masterSystemTime=1733531110630 2024-12-07T00:25:10,657 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,657 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=150}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:10,657 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=149 updating hbase:meta row=c613c7c0d4bf4a8124f2c33a8aabebf4, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:10,659 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2601): Post open deploy tasks for testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb., pid=151, masterSystemTime=1733531110633 2024-12-07T00:25:10,661 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] regionserver.HRegionServer(2628): Finished post open deploy task for testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,661 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=151}] handler.AssignRegionHandler(164): Opened testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,662 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=148 updating hbase:meta row=aa44abbb59514f8b493e3d258a51d1fb, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:10,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=150, resume processing ppid=149 2024-12-07T00:25:10,663 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=150, ppid=149, state=SUCCESS; OpenRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4, server=6e6711bf2d40,43281,1733530964846 in 182 msec 2024-12-07T00:25:10,665 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=149, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=c613c7c0d4bf4a8124f2c33a8aabebf4, ASSIGN in 342 msec 2024-12-07T00:25:10,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=151, resume processing ppid=148 2024-12-07T00:25:10,667 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=151, ppid=148, state=SUCCESS; OpenRegionProcedure aa44abbb59514f8b493e3d258a51d1fb, server=6e6711bf2d40,36059,1733530964705 in 184 msec 2024-12-07T00:25:10,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=148, resume processing ppid=147 2024-12-07T00:25:10,668 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=148, ppid=147, state=SUCCESS; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=aa44abbb59514f8b493e3d258a51d1fb, ASSIGN in 346 msec 2024-12-07T00:25:10,669 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:25:10,670 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531110669"}]},"ts":"1733531110669"} 2024-12-07T00:25:10,671 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-12-07T00:25:10,674 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=147, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:25:10,675 DEBUG [PEWorker-1 {}] access.PermissionStorage(175): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-12-07T00:25:10,677 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T00:25:10,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:10,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:10,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:10,679 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:10,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,682 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,683 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:10,685 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=147, state=SUCCESS; CreateTableProcedure table=testExportExpiredSnapshot in 450 msec 2024-12-07T00:25:10,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=147 2024-12-07T00:25:10,841 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testExportExpiredSnapshot, procId: 147 completed 2024-12-07T00:25:10,841 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testExportExpiredSnapshot get assigned. Timeout = 60000ms 2024-12-07T00:25:10,841 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:10,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testExportExpiredSnapshot assigned to meta. Checking AM states. 2024-12-07T00:25:10,845 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:10,845 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testExportExpiredSnapshot assigned. 2024-12-07T00:25:10,853 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:10,855 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:10,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testExportExpiredSnapshot 2024-12-07T00:25:10,858 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:10,858 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:10,868 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-07T00:25:10,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-12-07T00:25:10,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:10,869 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0371d0d7 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@758fed3b 2024-12-07T00:25:10,884 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1cf55a2e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:10,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:10,887 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45852, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0371d0d7 to 127.0.0.1:60354 2024-12-07T00:25:10,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:10,890 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x2e69a1f2 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@24857731 2024-12-07T00:25:10,895 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-12-07T00:25:10,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12977da5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:10,900 DEBUG [hconnection-0x286fff0a-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:10,901 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45856, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:10,903 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:10,904 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59774, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2e69a1f2 to 127.0.0.1:60354 2024-12-07T00:25:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:10,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-12-07T00:25:10,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=152, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-12-07T00:25:10,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-07T00:25:10,908 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:10,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T00:25:10,909 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:10,911 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:10,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742180_1356 (size=152) 2024-12-07T00:25:10,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742180_1356 (size=152) 2024-12-07T00:25:10,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742180_1356 (size=152) 2024-12-07T00:25:10,925 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:10,925 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa44abbb59514f8b493e3d258a51d1fb}, {pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4}] 2024-12-07T00:25:10,926 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:10,926 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:11,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T00:25:11,077 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:11,077 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:11,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=154 2024-12-07T00:25:11,078 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=153 2024-12-07T00:25:11,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:11,078 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:11,078 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2837): Flushing aa44abbb59514f8b493e3d258a51d1fb 1/1 column families, dataSize=65 B heapSize=400 B 2024-12-07T00:25:11,079 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2837): Flushing c613c7c0d4bf4a8124f2c33a8aabebf4 1/1 column families, dataSize=3.19 KB heapSize=7.14 KB 2024-12-07T00:25:11,103 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/.tmp/cf/1386f0a043104e94a699016bb791ceb1 is 71, key is 11893f1d62eaefaf95cb3fdd6c07500b/cf:q/1733531110855/Put/seqid=0 2024-12-07T00:25:11,111 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/.tmp/cf/a169914c046145d69775d65f4577b933 is 69, key is 03d4a2c03b6a0b3a3db54dfea5ede09f9/cf:q/1733531110853/Put/seqid=0 2024-12-07T00:25:11,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742181_1357 (size=8460) 2024-12-07T00:25:11,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742181_1357 (size=8460) 2024-12-07T00:25:11,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742181_1357 (size=8460) 2024-12-07T00:25:11,117 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.19 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/.tmp/cf/1386f0a043104e94a699016bb791ceb1 2024-12-07T00:25:11,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/.tmp/cf/1386f0a043104e94a699016bb791ceb1 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/cf/1386f0a043104e94a699016bb791ceb1 2024-12-07T00:25:11,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742182_1358 (size=5149) 2024-12-07T00:25:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742182_1358 (size=5149) 2024-12-07T00:25:11,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742182_1358 (size=5149) 2024-12-07T00:25:11,128 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/.tmp/cf/a169914c046145d69775d65f4577b933 2024-12-07T00:25:11,130 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/cf/1386f0a043104e94a699016bb791ceb1, entries=49, sequenceid=5, filesize=8.3 K 2024-12-07T00:25:11,131 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(3040): Finished flush of dataSize ~3.19 KB/3271, heapSize ~7.13 KB/7296, currentSize=0 B/0 for c613c7c0d4bf4a8124f2c33a8aabebf4 in 53ms, sequenceid=5, compaction requested=false 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.HRegion(2538): Flush status journal for c613c7c0d4bf4a8124f2c33a8aabebf4: 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. for snapshot-testExportExpiredSnapshot completed. 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/cf/1386f0a043104e94a699016bb791ceb1] hfiles 2024-12-07T00:25:11,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/cf/1386f0a043104e94a699016bb791ceb1 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,134 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/.tmp/cf/a169914c046145d69775d65f4577b933 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/cf/a169914c046145d69775d65f4577b933 2024-12-07T00:25:11,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/cf/a169914c046145d69775d65f4577b933, entries=1, sequenceid=5, filesize=5.0 K 2024-12-07T00:25:11,139 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(3040): Finished flush of dataSize ~65 B/65, heapSize ~384 B/384, currentSize=0 B/0 for aa44abbb59514f8b493e3d258a51d1fb in 61ms, sequenceid=5, compaction requested=false 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.HRegion(2538): Flush status journal for aa44abbb59514f8b493e3d258a51d1fb: 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. for snapshot-testExportExpiredSnapshot completed. 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/cf/a169914c046145d69775d65f4577b933] hfiles 2024-12-07T00:25:11,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/cf/a169914c046145d69775d65f4577b933 for snapshot=snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742183_1359 (size=103) 2024-12-07T00:25:11,142 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742183_1359 (size=103) 2024-12-07T00:25:11,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742183_1359 (size=103) 2024-12-07T00:25:11,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:25:11,143 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=154}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=154 2024-12-07T00:25:11,144 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=154 2024-12-07T00:25:11,144 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:11,144 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=154, ppid=152, state=RUNNABLE; SnapshotRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:25:11,146 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=154, ppid=152, state=SUCCESS; SnapshotRegionProcedure c613c7c0d4bf4a8124f2c33a8aabebf4 in 220 msec 2024-12-07T00:25:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742184_1360 (size=103) 2024-12-07T00:25:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742184_1360 (size=103) 2024-12-07T00:25:11,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742184_1360 (size=103) 2024-12-07T00:25:11,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:25:11,157 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=153}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=153 2024-12-07T00:25:11,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=153 2024-12-07T00:25:11,157 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:11,158 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=153, ppid=152, state=RUNNABLE; SnapshotRegionProcedure aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:25:11,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=153, resume processing ppid=152 2024-12-07T00:25:11,159 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=153, ppid=152, state=SUCCESS; SnapshotRegionProcedure aa44abbb59514f8b493e3d258a51d1fb in 233 msec 2024-12-07T00:25:11,159 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:11,160 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:11,161 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:11,161 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,161 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742185_1361 (size=609) 2024-12-07T00:25:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742185_1361 (size=609) 2024-12-07T00:25:11,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742185_1361 (size=609) 2024-12-07T00:25:11,174 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:11,179 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:11,179 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-12-07T00:25:11,181 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=152, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:11,181 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 152 2024-12-07T00:25:11,182 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=152, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=152, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 275 msec 2024-12-07T00:25:11,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=152 2024-12-07T00:25:11,211 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot, procId: 152 completed 2024-12-07T00:25:11,722 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0006_000001 (auth:SIMPLE) from 127.0.0.1:41932 2024-12-07T00:25:11,859 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_3/usercache/jenkins/appcache/application_1733530972728_0006/container_1733530972728_0006_01_000001/sysfs] 2024-12-07T00:25:12,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:25:12,937 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:25:14,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-07T00:25:14,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-07T00:25:14,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-07T00:25:14,378 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-12-07T00:25:14,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-12-07T00:25:14,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-12-07T00:25:19,880 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:25:21,222 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531121221 2024-12-07T00:25:21,222 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531121221, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531121221, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:21,256 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:21,256 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531121221, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531121221/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-12-07T00:25:21,258 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:25:21,259 ERROR [Time-limited test {}] util.AbstractHBaseTool(153): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:948) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1093) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:315) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:25:21,261 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,261 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=155, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T00:25:21,264 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531121264"}]},"ts":"1733531121264"} 2024-12-07T00:25:21,265 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-12-07T00:25:21,267 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-12-07T00:25:21,268 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-12-07T00:25:21,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, UNASSIGN}, {pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, UNASSIGN}] 2024-12-07T00:25:21,270 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=158, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, UNASSIGN 2024-12-07T00:25:21,270 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=157, ppid=156, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, UNASSIGN 2024-12-07T00:25:21,271 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=f7a4e93ffe965017ee70266453a2811f, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:21,271 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=1eccefe77eaed05be3d668802482d57a, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:21,272 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:21,272 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=159, ppid=158, state=RUNNABLE; CloseRegionProcedure 1eccefe77eaed05be3d668802482d57a, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:25:21,272 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:21,272 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=160, ppid=157, state=RUNNABLE; CloseRegionProcedure f7a4e93ffe965017ee70266453a2811f, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:25:21,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T00:25:21,424 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:21,424 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:21,424 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(124): Close 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:21,424 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(124): Close f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:21,424 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:21,424 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1681): Closing 1eccefe77eaed05be3d668802482d57a, disabling compactions & flushes 2024-12-07T00:25:21,425 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1681): Closing f7a4e93ffe965017ee70266453a2811f, disabling compactions & flushes 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. after waiting 0 ms 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:21,425 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1703): Closing region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. after waiting 0 ms 2024-12-07T00:25:21,425 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:21,429 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:21,429 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:21,429 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:21,429 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:21,430 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f. 2024-12-07T00:25:21,430 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1922): Closed testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a. 2024-12-07T00:25:21,430 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] regionserver.HRegion(1635): Region close journal for 1eccefe77eaed05be3d668802482d57a: 2024-12-07T00:25:21,430 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] regionserver.HRegion(1635): Region close journal for f7a4e93ffe965017ee70266453a2811f: 2024-12-07T00:25:21,431 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=159}] handler.UnassignRegionHandler(170): Closed 1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:21,431 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=158 updating hbase:meta row=1eccefe77eaed05be3d668802482d57a, regionState=CLOSED 2024-12-07T00:25:21,432 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=160}] handler.UnassignRegionHandler(170): Closed f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:21,433 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=157 updating hbase:meta row=f7a4e93ffe965017ee70266453a2811f, regionState=CLOSED 2024-12-07T00:25:21,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=159, resume processing ppid=158 2024-12-07T00:25:21,434 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=159, ppid=158, state=SUCCESS; CloseRegionProcedure 1eccefe77eaed05be3d668802482d57a, server=6e6711bf2d40,43281,1733530964846 in 161 msec 2024-12-07T00:25:21,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=160, resume processing ppid=157 2024-12-07T00:25:21,435 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=158, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=1eccefe77eaed05be3d668802482d57a, UNASSIGN in 165 msec 2024-12-07T00:25:21,435 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=160, ppid=157, state=SUCCESS; CloseRegionProcedure f7a4e93ffe965017ee70266453a2811f, server=6e6711bf2d40,33095,1733530964767 in 162 msec 2024-12-07T00:25:21,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=157, resume processing ppid=156 2024-12-07T00:25:21,436 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=157, ppid=156, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=f7a4e93ffe965017ee70266453a2811f, UNASSIGN in 166 msec 2024-12-07T00:25:21,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=156, resume processing ppid=155 2024-12-07T00:25:21,438 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=156, ppid=155, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 169 msec 2024-12-07T00:25:21,439 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531121439"}]},"ts":"1733531121439"} 2024-12-07T00:25:21,440 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-12-07T00:25:21,441 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-12-07T00:25:21,443 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=155, state=SUCCESS; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 181 msec 2024-12-07T00:25:21,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=155 2024-12-07T00:25:21,565 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 155 completed 2024-12-07T00:25:21,566 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,567 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,567 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=161, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,568 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,568 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=161, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,569 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,571 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:21,571 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-07T00:25:21,573 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-12-07T00:25:21,574 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/recovered.edits] 2024-12-07T00:25:21,574 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/recovered.edits] 2024-12-07T00:25:21,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:21,574 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:21,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-07T00:25:21,575 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:25:21,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:21,575 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:21,575 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data null 2024-12-07T00:25:21,575 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:25:21,575 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-07T00:25:21,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:21,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:21,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:21,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:21,578 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/cf/ff61afc9c400429aa9f9d80dca814fb7 2024-12-07T00:25:21,581 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a/recovered.edits/9.seqid 2024-12-07T00:25:21,582 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/1eccefe77eaed05be3d668802482d57a 2024-12-07T00:25:21,582 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/cf/654acdb3fad64742b543dbc11ffbc036 2024-12-07T00:25:21,584 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f/recovered.edits/9.seqid 2024-12-07T00:25:21,584 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportExpiredSnapshot/f7a4e93ffe965017ee70266453a2811f 2024-12-07T00:25:21,585 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-12-07T00:25:21,586 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=161, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,588 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-12-07T00:25:21,590 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-12-07T00:25:21,591 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=161, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,591 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-12-07T00:25:21,591 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531121591"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:21,591 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531121591"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:21,593 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:25:21,593 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f7a4e93ffe965017ee70266453a2811f, NAME => 'testtb-testExportExpiredSnapshot,,1733531108113.f7a4e93ffe965017ee70266453a2811f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 1eccefe77eaed05be3d668802482d57a, NAME => 'testtb-testExportExpiredSnapshot,1,1733531108113.1eccefe77eaed05be3d668802482d57a.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:25:21,593 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-12-07T00:25:21,593 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531121593"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:21,595 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportExpiredSnapshot state from META 2024-12-07T00:25:21,597 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=161, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-12-07T00:25:21,597 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=161, state=SUCCESS; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 31 msec 2024-12-07T00:25:21,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=161 2024-12-07T00:25:21,677 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot, procId: 161 completed 2024-12-07T00:25:21,685 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" 2024-12-07T00:25:21,687 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-12-07T00:25:21,688 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" 2024-12-07T00:25:21,690 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-12-07T00:25:21,691 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" 2024-12-07T00:25:21,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-12-07T00:25:21,715 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportExpiredSnapshot Thread=792 (was 795), OpenFileDescriptor=781 (was 790), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=673 (was 717), ProcessCount=11 (was 17), AvailableMemoryMB=3742 (was 2888) - AvailableMemoryMB LEAK? - 2024-12-07T00:25:21,715 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-07T00:25:21,733 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=792, OpenFileDescriptor=781, MaxFileDescriptor=1048576, SystemLoadAverage=673, ProcessCount=11, AvailableMemoryMB=3742 2024-12-07T00:25:21,733 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=792 is superior to 500 2024-12-07T00:25:21,735 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:25:21,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:21,737 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:25:21,737 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:21,737 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 162 2024-12-07T00:25:21,737 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:25:21,738 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T00:25:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742186_1362 (size=412) 2024-12-07T00:25:21,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742186_1362 (size=412) 2024-12-07T00:25:21,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742186_1362 (size=412) 2024-12-07T00:25:21,746 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => 38b9299c00c909a09d0e514bfec0f342, NAME => 'testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:21,747 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 06847a3269e13358b0a6ef24df043fda, NAME => 'testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742188_1364 (size=73) 2024-12-07T00:25:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742187_1363 (size=73) 2024-12-07T00:25:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742188_1364 (size=73) 2024-12-07T00:25:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742187_1363 (size=73) 2024-12-07T00:25:21,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742188_1364 (size=73) 2024-12-07T00:25:21,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742187_1363 (size=73) 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1681): Closing 06847a3269e13358b0a6ef24df043fda, disabling compactions & flushes 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:21,755 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. after waiting 0 ms 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1681): Closing 38b9299c00c909a09d0e514bfec0f342, disabling compactions & flushes 2024-12-07T00:25:21,755 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1635): Region close journal for 06847a3269e13358b0a6ef24df043fda: 2024-12-07T00:25:21,755 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. after waiting 0 ms 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:21,755 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:21,755 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1635): Region close journal for 38b9299c00c909a09d0e514bfec0f342: 2024-12-07T00:25:21,757 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:25:21,757 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733531121757"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531121757"}]},"ts":"1733531121757"} 2024-12-07T00:25:21,757 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1733531121757"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531121757"}]},"ts":"1733531121757"} 2024-12-07T00:25:21,759 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:25:21,760 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:25:21,760 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531121760"}]},"ts":"1733531121760"} 2024-12-07T00:25:21,761 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-12-07T00:25:21,765 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:25:21,766 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:25:21,766 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:25:21,766 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:25:21,766 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:25:21,766 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:25:21,766 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:25:21,766 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:25:21,767 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, ASSIGN}, {pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, ASSIGN}] 2024-12-07T00:25:21,768 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, ASSIGN 2024-12-07T00:25:21,768 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, ASSIGN 2024-12-07T00:25:21,769 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(264): Starting pid=164, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, ASSIGN; state=OFFLINE, location=6e6711bf2d40,36059,1733530964705; forceNewPlan=false, retain=false 2024-12-07T00:25:21,769 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=163, ppid=162, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:25:21,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T00:25:21,919 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:25:21,919 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=38b9299c00c909a09d0e514bfec0f342, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:21,919 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=06847a3269e13358b0a6ef24df043fda, regionState=OPENING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:21,921 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=165, ppid=163, state=RUNNABLE; OpenRegionProcedure 38b9299c00c909a09d0e514bfec0f342, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:25:21,922 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=166, ppid=164, state=RUNNABLE; OpenRegionProcedure 06847a3269e13358b0a6ef24df043fda, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:25:22,040 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T00:25:22,072 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:22,073 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:22,075 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7285): Opening region: {ENCODED => 38b9299c00c909a09d0e514bfec0f342, NAME => 'testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. service=AccessControlService 2024-12-07T00:25:22,076 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(135): Open testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7285): Opening region: {ENCODED => 06847a3269e13358b0a6ef24df043fda, NAME => 'testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:25:22,076 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. service=AccessControlService 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7327): checking encryption for 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,076 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(7330): checking classloading for 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,077 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:22,077 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,077 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(894): Instantiated testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:22,077 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7327): checking encryption for 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,077 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(7330): checking classloading for 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,078 INFO [StoreOpener-38b9299c00c909a09d0e514bfec0f342-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,078 INFO [StoreOpener-06847a3269e13358b0a6ef24df043fda-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,079 INFO [StoreOpener-38b9299c00c909a09d0e514bfec0f342-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 38b9299c00c909a09d0e514bfec0f342 columnFamilyName cf 2024-12-07T00:25:22,079 DEBUG [StoreOpener-38b9299c00c909a09d0e514bfec0f342-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:22,079 INFO [StoreOpener-06847a3269e13358b0a6ef24df043fda-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 06847a3269e13358b0a6ef24df043fda columnFamilyName cf 2024-12-07T00:25:22,079 INFO [StoreOpener-38b9299c00c909a09d0e514bfec0f342-1 {}] regionserver.HStore(327): Store=38b9299c00c909a09d0e514bfec0f342/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:22,080 DEBUG [StoreOpener-06847a3269e13358b0a6ef24df043fda-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:22,080 INFO [StoreOpener-06847a3269e13358b0a6ef24df043fda-1 {}] regionserver.HStore(327): Store=06847a3269e13358b0a6ef24df043fda/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:22,080 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,081 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,081 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,081 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,083 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1085): writing seq id for 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,083 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1085): writing seq id for 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,084 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:22,085 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1102): Opened 38b9299c00c909a09d0e514bfec0f342; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75139248, jitterRate=0.11966204643249512}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:22,085 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:22,085 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegion(1001): Region open journal for 38b9299c00c909a09d0e514bfec0f342: 2024-12-07T00:25:22,086 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1102): Opened 06847a3269e13358b0a6ef24df043fda; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68057434, jitterRate=0.014134794473648071}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:22,086 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegion(1001): Region open journal for 06847a3269e13358b0a6ef24df043fda: 2024-12-07T00:25:22,086 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342., pid=165, masterSystemTime=1733531122072 2024-12-07T00:25:22,086 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda., pid=166, masterSystemTime=1733531122073 2024-12-07T00:25:22,087 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,087 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=165}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,088 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=163 updating hbase:meta row=38b9299c00c909a09d0e514bfec0f342, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:22,088 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,088 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=166}] handler.AssignRegionHandler(164): Opened testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,088 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=164 updating hbase:meta row=06847a3269e13358b0a6ef24df043fda, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:22,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=165, resume processing ppid=163 2024-12-07T00:25:22,090 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=165, ppid=163, state=SUCCESS; OpenRegionProcedure 38b9299c00c909a09d0e514bfec0f342, server=6e6711bf2d40,33095,1733530964767 in 168 msec 2024-12-07T00:25:22,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=166, resume processing ppid=164 2024-12-07T00:25:22,091 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=163, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, ASSIGN in 323 msec 2024-12-07T00:25:22,091 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=166, ppid=164, state=SUCCESS; OpenRegionProcedure 06847a3269e13358b0a6ef24df043fda, server=6e6711bf2d40,36059,1733530964705 in 168 msec 2024-12-07T00:25:22,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=164, resume processing ppid=162 2024-12-07T00:25:22,092 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=164, ppid=162, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, ASSIGN in 324 msec 2024-12-07T00:25:22,093 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:25:22,093 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531122093"}]},"ts":"1733531122093"} 2024-12-07T00:25:22,094 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-12-07T00:25:22,096 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=162, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:25:22,096 DEBUG [PEWorker-4 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-12-07T00:25:22,098 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:25:22,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:22,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:22,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:22,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:22,106 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,107 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:22,108 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=162, state=SUCCESS; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 371 msec 2024-12-07T00:25:22,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=162 2024-12-07T00:25:22,341 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 162 completed 2024-12-07T00:25:22,341 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testEmptyExportFileSystemState get assigned. Timeout = 60000ms 2024-12-07T00:25:22,341 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:22,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testEmptyExportFileSystemState assigned to meta. Checking AM states. 2024-12-07T00:25:22,345 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:22,345 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testEmptyExportFileSystemState assigned. 2024-12-07T00:25:22,347 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:25:22,347 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531122347 (current time:1733531122347). 2024-12-07T00:25:22,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:22,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-07T00:25:22,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:22,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0cfe1490 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@29763821 2024-12-07T00:25:22,353 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9929898, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:22,354 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,355 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56586, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0cfe1490 to 127.0.0.1:60354 2024-12-07T00:25:22,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:22,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x75d11601 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6dfe9faa 2024-12-07T00:25:22,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@70358cd5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:22,361 DEBUG [hconnection-0x473c2d69-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,363 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56596, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,364 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,365 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55950, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x75d11601 to 127.0.0.1:60354 2024-12-07T00:25:22,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:22,366 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:25:22,367 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:22,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=167, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:25:22,368 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-07T00:25:22,368 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:22,369 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T00:25:22,369 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:22,371 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:22,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742189_1365 (size=185) 2024-12-07T00:25:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742189_1365 (size=185) 2024-12-07T00:25:22,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742189_1365 (size=185) 2024-12-07T00:25:22,383 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:22,384 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342}, {pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda}] 2024-12-07T00:25:22,385 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,385 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T00:25:22,536 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:22,536 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:22,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=169 2024-12-07T00:25:22,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=168 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.HRegion(2538): Flush status journal for 06847a3269e13358b0a6ef24df043fda: 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.HRegion(2538): Flush status journal for 38b9299c00c909a09d0e514bfec0f342: 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:22,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:22,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742191_1367 (size=76) 2024-12-07T00:25:22,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742191_1367 (size=76) 2024-12-07T00:25:22,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742191_1367 (size=76) 2024-12-07T00:25:22,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,550 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=169}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=169 2024-12-07T00:25:22,550 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=169 2024-12-07T00:25:22,550 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,551 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=169, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,552 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=169, ppid=167, state=SUCCESS; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda in 168 msec 2024-12-07T00:25:22,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742190_1366 (size=76) 2024-12-07T00:25:22,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742190_1366 (size=76) 2024-12-07T00:25:22,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742190_1366 (size=76) 2024-12-07T00:25:22,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=168}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=168 2024-12-07T00:25:22,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=168 2024-12-07T00:25:22,555 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,555 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=168, ppid=167, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=168, resume processing ppid=167 2024-12-07T00:25:22,557 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=168, ppid=167, state=SUCCESS; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 in 173 msec 2024-12-07T00:25:22,557 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:22,558 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:22,558 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:22,558 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,559 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742192_1368 (size=567) 2024-12-07T00:25:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742192_1368 (size=567) 2024-12-07T00:25:22,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742192_1368 (size=567) 2024-12-07T00:25:22,571 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:22,575 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:22,576 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,577 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=167, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:22,577 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 167 2024-12-07T00:25:22,579 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=167, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=167, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 210 msec 2024-12-07T00:25:22,670 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=167 2024-12-07T00:25:22,671 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 167 completed 2024-12-07T00:25:22,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:22,678 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36059 {}] regionserver.HRegion(8254): writing data to region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:22,681 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-12-07T00:25:22,681 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,681 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:22,693 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531122693 (current time:1733531122693). 2024-12-07T00:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-12-07T00:25:22,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:22,694 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x7f35e226 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@59c4dfcc 2024-12-07T00:25:22,700 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@610862b7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:22,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,702 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54886, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x7f35e226 to 127.0.0.1:60354 2024-12-07T00:25:22,704 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:22,705 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x463cac9a to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@39f9312e 2024-12-07T00:25:22,710 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bc6f17c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:22,711 DEBUG [hconnection-0x4f9f1f53-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,712 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54890, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,714 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:22,715 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34402, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x463cac9a to 127.0.0.1:60354 2024-12-07T00:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:22,716 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-12-07T00:25:22,717 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=170, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-12-07T00:25:22,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-07T00:25:22,718 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:22,719 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T00:25:22,719 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:22,721 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:22,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742193_1369 (size=180) 2024-12-07T00:25:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742193_1369 (size=180) 2024-12-07T00:25:22,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742193_1369 (size=180) 2024-12-07T00:25:22,729 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:22,729 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342}, {pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda}] 2024-12-07T00:25:22,730 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,730 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,820 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T00:25:22,880 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:22,880 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:22,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=171 2024-12-07T00:25:22,881 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36059 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=172 2024-12-07T00:25:22,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,881 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2837): Flushing 38b9299c00c909a09d0e514bfec0f342 1/1 column families, dataSize=333 B heapSize=976 B 2024-12-07T00:25:22,882 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2837): Flushing 06847a3269e13358b0a6ef24df043fda 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-12-07T00:25:22,896 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-12-07T00:25:22,899 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/.tmp/cf/82756c21d7d64ee1910987e2dc710ed8 is 71, key is 2a6b97e5fe555e4317f6aa4ca1594f68/cf:q/1733531122678/Put/seqid=0 2024-12-07T00:25:22,904 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/.tmp/cf/9812eff8ba1043018d80b1f389ae254b is 71, key is 02acce0e085b1e51b1160a9f86565550/cf:q/1733531122677/Put/seqid=0 2024-12-07T00:25:22,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742194_1370 (size=8190) 2024-12-07T00:25:22,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742194_1370 (size=8190) 2024-12-07T00:25:22,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742194_1370 (size=8190) 2024-12-07T00:25:22,918 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.93 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/.tmp/cf/82756c21d7d64ee1910987e2dc710ed8 2024-12-07T00:25:22,923 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/.tmp/cf/82756c21d7d64ee1910987e2dc710ed8 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8 2024-12-07T00:25:22,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742195_1371 (size=5424) 2024-12-07T00:25:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742195_1371 (size=5424) 2024-12-07T00:25:22,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742195_1371 (size=5424) 2024-12-07T00:25:22,927 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=333 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/.tmp/cf/9812eff8ba1043018d80b1f389ae254b 2024-12-07T00:25:22,929 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8, entries=45, sequenceid=6, filesize=8.0 K 2024-12-07T00:25:22,930 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(3040): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 06847a3269e13358b0a6ef24df043fda in 49ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.HRegion(2538): Flush status journal for 06847a3269e13358b0a6ef24df043fda: 2024-12-07T00:25:22,930 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-07T00:25:22,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:22,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8] hfiles 2024-12-07T00:25:22,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,932 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/.tmp/cf/9812eff8ba1043018d80b1f389ae254b as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b 2024-12-07T00:25:22,937 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b, entries=5, sequenceid=6, filesize=5.3 K 2024-12-07T00:25:22,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742196_1372 (size=115) 2024-12-07T00:25:22,938 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(3040): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 38b9299c00c909a09d0e514bfec0f342 in 57ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.HRegion(2538): Flush status journal for 38b9299c00c909a09d0e514bfec0f342: 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. for snaptb0-testEmptyExportFileSystemState completed. 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b] hfiles 2024-12-07T00:25:22,938 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b for snapshot=snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742196_1372 (size=115) 2024-12-07T00:25:22,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742196_1372 (size=115) 2024-12-07T00:25:22,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:22,939 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=172}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=172 2024-12-07T00:25:22,939 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=172 2024-12-07T00:25:22,939 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,939 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=172, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:22,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=172, ppid=170, state=SUCCESS; SnapshotRegionProcedure 06847a3269e13358b0a6ef24df043fda in 211 msec 2024-12-07T00:25:22,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742197_1373 (size=115) 2024-12-07T00:25:22,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742197_1373 (size=115) 2024-12-07T00:25:22,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742197_1373 (size=115) 2024-12-07T00:25:22,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:22,946 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=171}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=171 2024-12-07T00:25:22,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=171 2024-12-07T00:25:22,946 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,946 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=171, ppid=170, state=RUNNABLE; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:22,948 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=171, resume processing ppid=170 2024-12-07T00:25:22,948 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=171, ppid=170, state=SUCCESS; SnapshotRegionProcedure 38b9299c00c909a09d0e514bfec0f342 in 218 msec 2024-12-07T00:25:22,948 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:22,949 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:22,949 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:22,949 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,950 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742198_1374 (size=645) 2024-12-07T00:25:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742198_1374 (size=645) 2024-12-07T00:25:22,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742198_1374 (size=645) 2024-12-07T00:25:22,964 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:22,968 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:22,968 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:22,970 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=170, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:22,970 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 170 2024-12-07T00:25:22,971 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=170, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=170, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 253 msec 2024-12-07T00:25:23,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=170 2024-12-07T00:25:23,021 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 170 completed 2024-12-07T00:25:23,021 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021 2024-12-07T00:25:23,021 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:23,051 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:23,051 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:23,053 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:25:23,057 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742199_1375 (size=185) 2024-12-07T00:25:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742200_1376 (size=567) 2024-12-07T00:25:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742200_1376 (size=567) 2024-12-07T00:25:23,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742199_1375 (size=185) 2024-12-07T00:25:23,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742199_1375 (size=185) 2024-12-07T00:25:23,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742200_1376 (size=567) 2024-12-07T00:25:23,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:23,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:23,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:23,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-15539399104708587816.jar 2024-12-07T00:25:24,073 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,074 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-4964167803783897701.jar 2024-12-07T00:25:24,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,150 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:25:24,151 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:25:24,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:25:24,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:25:24,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:25:24,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:25:24,152 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:25:24,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:25:24,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:25:24,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:25:24,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:25:24,153 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:24,154 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:24,155 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:24,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742201_1377 (size=127628) 2024-12-07T00:25:24,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742201_1377 (size=127628) 2024-12-07T00:25:24,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742201_1377 (size=127628) 2024-12-07T00:25:24,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T00:25:24,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T00:25:24,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742202_1378 (size=2172101) 2024-12-07T00:25:24,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742203_1379 (size=6350154) 2024-12-07T00:25:24,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742203_1379 (size=6350154) 2024-12-07T00:25:24,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742203_1379 (size=6350154) 2024-12-07T00:25:24,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742204_1380 (size=213228) 2024-12-07T00:25:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742204_1380 (size=213228) 2024-12-07T00:25:24,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742204_1380 (size=213228) 2024-12-07T00:25:24,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742205_1381 (size=1877034) 2024-12-07T00:25:24,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742205_1381 (size=1877034) 2024-12-07T00:25:24,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742205_1381 (size=1877034) 2024-12-07T00:25:24,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742206_1382 (size=533455) 2024-12-07T00:25:24,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742206_1382 (size=533455) 2024-12-07T00:25:24,305 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742206_1382 (size=533455) 2024-12-07T00:25:24,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742207_1383 (size=7280644) 2024-12-07T00:25:24,334 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742207_1383 (size=7280644) 2024-12-07T00:25:24,335 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742207_1383 (size=7280644) 2024-12-07T00:25:24,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742208_1384 (size=4188619) 2024-12-07T00:25:24,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742208_1384 (size=4188619) 2024-12-07T00:25:24,359 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742208_1384 (size=4188619) 2024-12-07T00:25:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742209_1385 (size=20406) 2024-12-07T00:25:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742209_1385 (size=20406) 2024-12-07T00:25:24,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742209_1385 (size=20406) 2024-12-07T00:25:24,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-07T00:25:24,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-12-07T00:25:24,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-12-07T00:25:24,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742210_1386 (size=75495) 2024-12-07T00:25:24,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742210_1386 (size=75495) 2024-12-07T00:25:24,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742210_1386 (size=75495) 2024-12-07T00:25:24,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742211_1387 (size=451756) 2024-12-07T00:25:24,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742211_1387 (size=451756) 2024-12-07T00:25:24,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742211_1387 (size=451756) 2024-12-07T00:25:24,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742212_1388 (size=45609) 2024-12-07T00:25:24,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742212_1388 (size=45609) 2024-12-07T00:25:24,402 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742212_1388 (size=45609) 2024-12-07T00:25:24,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742213_1389 (size=110084) 2024-12-07T00:25:24,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742213_1389 (size=110084) 2024-12-07T00:25:24,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742213_1389 (size=110084) 2024-12-07T00:25:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742214_1390 (size=1323991) 2024-12-07T00:25:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742214_1390 (size=1323991) 2024-12-07T00:25:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742214_1390 (size=1323991) 2024-12-07T00:25:24,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742215_1391 (size=23076) 2024-12-07T00:25:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742215_1391 (size=23076) 2024-12-07T00:25:24,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742215_1391 (size=23076) 2024-12-07T00:25:24,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742216_1392 (size=126803) 2024-12-07T00:25:24,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742216_1392 (size=126803) 2024-12-07T00:25:24,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742216_1392 (size=126803) 2024-12-07T00:25:24,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742217_1393 (size=322274) 2024-12-07T00:25:24,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742217_1393 (size=322274) 2024-12-07T00:25:24,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742217_1393 (size=322274) 2024-12-07T00:25:24,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742218_1394 (size=1832290) 2024-12-07T00:25:24,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742218_1394 (size=1832290) 2024-12-07T00:25:24,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742218_1394 (size=1832290) 2024-12-07T00:25:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742219_1395 (size=30081) 2024-12-07T00:25:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742219_1395 (size=30081) 2024-12-07T00:25:24,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742219_1395 (size=30081) 2024-12-07T00:25:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742220_1396 (size=53616) 2024-12-07T00:25:24,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742220_1396 (size=53616) 2024-12-07T00:25:24,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742220_1396 (size=53616) 2024-12-07T00:25:24,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742221_1397 (size=29229) 2024-12-07T00:25:24,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742221_1397 (size=29229) 2024-12-07T00:25:24,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742221_1397 (size=29229) 2024-12-07T00:25:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742222_1398 (size=169089) 2024-12-07T00:25:24,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742222_1398 (size=169089) 2024-12-07T00:25:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742222_1398 (size=169089) 2024-12-07T00:25:24,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T00:25:24,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T00:25:24,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742223_1399 (size=5175431) 2024-12-07T00:25:24,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742224_1400 (size=136454) 2024-12-07T00:25:24,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742224_1400 (size=136454) 2024-12-07T00:25:24,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742224_1400 (size=136454) 2024-12-07T00:25:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742225_1401 (size=907851) 2024-12-07T00:25:24,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742225_1401 (size=907851) 2024-12-07T00:25:24,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742225_1401 (size=907851) 2024-12-07T00:25:24,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T00:25:24,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T00:25:24,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742226_1402 (size=3317408) 2024-12-07T00:25:24,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742227_1403 (size=503880) 2024-12-07T00:25:24,610 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742227_1403 (size=503880) 2024-12-07T00:25:24,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742227_1403 (size=503880) 2024-12-07T00:25:24,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T00:25:24,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T00:25:24,633 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742228_1404 (size=4695811) 2024-12-07T00:25:24,634 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:25:24,636 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-12-07T00:25:24,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742229_1405 (size=7) 2024-12-07T00:25:24,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742229_1405 (size=7) 2024-12-07T00:25:24,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742229_1405 (size=7) 2024-12-07T00:25:24,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742230_1406 (size=10) 2024-12-07T00:25:24,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742230_1406 (size=10) 2024-12-07T00:25:24,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742230_1406 (size=10) 2024-12-07T00:25:24,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742231_1407 (size=304788) 2024-12-07T00:25:24,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742231_1407 (size=304788) 2024-12-07T00:25:24,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742231_1407 (size=304788) 2024-12-07T00:25:24,692 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:25:24,692 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:25:24,729 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0007_000001 (auth:SIMPLE) from 127.0.0.1:46532 2024-12-07T00:25:26,932 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:25:30,055 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0007_000001 (auth:SIMPLE) from 127.0.0.1:46208 2024-12-07T00:25:30,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742232_1408 (size=350438) 2024-12-07T00:25:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742232_1408 (size=350438) 2024-12-07T00:25:30,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742232_1408 (size=350438) 2024-12-07T00:25:31,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742233_1409 (size=8568) 2024-12-07T00:25:31,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742233_1409 (size=8568) 2024-12-07T00:25:31,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742233_1409 (size=8568) 2024-12-07T00:25:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742234_1410 (size=460) 2024-12-07T00:25:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742234_1410 (size=460) 2024-12-07T00:25:31,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742234_1410 (size=460) 2024-12-07T00:25:31,210 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742235_1411 (size=8568) 2024-12-07T00:25:31,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742235_1411 (size=8568) 2024-12-07T00:25:31,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742235_1411 (size=8568) 2024-12-07T00:25:31,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742236_1412 (size=350438) 2024-12-07T00:25:31,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742236_1412 (size=350438) 2024-12-07T00:25:31,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742236_1412 (size=350438) 2024-12-07T00:25:32,809 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:25:32,809 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:25:32,828 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:32,828 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:25:32,828 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:25:32,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:32,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-07T00:25:32,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-07T00:25:32,829 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:32,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-12-07T00:25:32,830 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531123021/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-12-07T00:25:32,840 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testEmptyExportFileSystemState 2024-12-07T00:25:32,840 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-12-07T00:25:32,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=173, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:32,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T00:25:32,844 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531132843"}]},"ts":"1733531132843"} 2024-12-07T00:25:32,845 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-12-07T00:25:32,847 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-12-07T00:25:32,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=174, ppid=173, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-12-07T00:25:32,849 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, UNASSIGN}, {pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, UNASSIGN}] 2024-12-07T00:25:32,850 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=176, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, UNASSIGN 2024-12-07T00:25:32,851 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=175, ppid=174, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, UNASSIGN 2024-12-07T00:25:32,851 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=38b9299c00c909a09d0e514bfec0f342, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:32,851 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=06847a3269e13358b0a6ef24df043fda, regionState=CLOSING, regionLocation=6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:32,853 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:32,853 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=177, ppid=176, state=RUNNABLE; CloseRegionProcedure 06847a3269e13358b0a6ef24df043fda, server=6e6711bf2d40,36059,1733530964705}] 2024-12-07T00:25:32,854 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:25:32,854 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=178, ppid=175, state=RUNNABLE; CloseRegionProcedure 38b9299c00c909a09d0e514bfec0f342, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:25:32,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T00:25:33,005 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,36059,1733530964705 2024-12-07T00:25:33,006 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(124): Close 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:33,006 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:33,006 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:33,006 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1681): Closing 06847a3269e13358b0a6ef24df043fda, disabling compactions & flushes 2024-12-07T00:25:33,006 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:33,006 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. after waiting 0 ms 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:33,007 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(124): Close 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1681): Closing 38b9299c00c909a09d0e514bfec0f342, disabling compactions & flushes 2024-12-07T00:25:33,007 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1703): Closing region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1791): Acquired close lock on testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. after waiting 0 ms 2024-12-07T00:25:33,007 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1801): Updates disabled for region testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:33,020 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:33,028 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:33,028 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342. 2024-12-07T00:25:33,028 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] regionserver.HRegion(1635): Region close journal for 38b9299c00c909a09d0e514bfec0f342: 2024-12-07T00:25:33,028 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:25:33,030 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:25:33,031 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1922): Closed testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda. 2024-12-07T00:25:33,031 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] regionserver.HRegion(1635): Region close journal for 06847a3269e13358b0a6ef24df043fda: 2024-12-07T00:25:33,031 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=178}] handler.UnassignRegionHandler(170): Closed 38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:33,032 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=175 updating hbase:meta row=38b9299c00c909a09d0e514bfec0f342, regionState=CLOSED 2024-12-07T00:25:33,033 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=177}] handler.UnassignRegionHandler(170): Closed 06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:33,034 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=176 updating hbase:meta row=06847a3269e13358b0a6ef24df043fda, regionState=CLOSED 2024-12-07T00:25:33,037 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=178, resume processing ppid=175 2024-12-07T00:25:33,038 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=178, ppid=175, state=SUCCESS; CloseRegionProcedure 38b9299c00c909a09d0e514bfec0f342, server=6e6711bf2d40,33095,1733530964767 in 181 msec 2024-12-07T00:25:33,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=177, resume processing ppid=176 2024-12-07T00:25:33,039 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=177, ppid=176, state=SUCCESS; CloseRegionProcedure 06847a3269e13358b0a6ef24df043fda, server=6e6711bf2d40,36059,1733530964705 in 184 msec 2024-12-07T00:25:33,040 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=175, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=38b9299c00c909a09d0e514bfec0f342, UNASSIGN in 189 msec 2024-12-07T00:25:33,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=176, resume processing ppid=174 2024-12-07T00:25:33,041 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=176, ppid=174, state=SUCCESS; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=06847a3269e13358b0a6ef24df043fda, UNASSIGN in 190 msec 2024-12-07T00:25:33,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=174, resume processing ppid=173 2024-12-07T00:25:33,043 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=174, ppid=173, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 194 msec 2024-12-07T00:25:33,044 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531133044"}]},"ts":"1733531133044"} 2024-12-07T00:25:33,053 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-12-07T00:25:33,055 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-12-07T00:25:33,057 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=173, state=SUCCESS; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 216 msec 2024-12-07T00:25:33,145 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=173 2024-12-07T00:25:33,145 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 173 completed 2024-12-07T00:25:33,146 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,148 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=179, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,148 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,149 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=179, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,151 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,155 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,156 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,156 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:33,156 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:33,160 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T00:25:33,160 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T00:25:33,160 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:33,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:33,162 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data null 2024-12-07T00:25:33,162 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(135): Skipping permission cache refresh because writable data is empty 2024-12-07T00:25:33,163 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/recovered.edits] 2024-12-07T00:25:33,163 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T00:25:33,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:33,164 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:33,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:33,165 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:33,166 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/recovered.edits] 2024-12-07T00:25:33,170 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/cf/82756c21d7d64ee1910987e2dc710ed8 2024-12-07T00:25:33,171 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/cf/9812eff8ba1043018d80b1f389ae254b 2024-12-07T00:25:33,173 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda/recovered.edits/9.seqid 2024-12-07T00:25:33,174 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/06847a3269e13358b0a6ef24df043fda 2024-12-07T00:25:33,175 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342/recovered.edits/9.seqid 2024-12-07T00:25:33,176 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testEmptyExportFileSystemState/38b9299c00c909a09d0e514bfec0f342 2024-12-07T00:25:33,176 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-12-07T00:25:33,179 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=179, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,181 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-12-07T00:25:33,188 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-12-07T00:25:33,189 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=179, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,189 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-12-07T00:25:33,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531133189"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:33,189 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531133189"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:33,193 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:25:33,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => 38b9299c00c909a09d0e514bfec0f342, NAME => 'testtb-testEmptyExportFileSystemState,,1733531121734.38b9299c00c909a09d0e514bfec0f342.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 06847a3269e13358b0a6ef24df043fda, NAME => 'testtb-testEmptyExportFileSystemState,1,1733531121734.06847a3269e13358b0a6ef24df043fda.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:25:33,193 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-12-07T00:25:33,193 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531133193"}]},"ts":"9223372036854775807"} 2024-12-07T00:25:33,197 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-12-07T00:25:33,200 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=179, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-12-07T00:25:33,201 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=179, state=SUCCESS; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 54 msec 2024-12-07T00:25:33,264 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=179 2024-12-07T00:25:33,265 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState, procId: 179 completed 2024-12-07T00:25:33,272 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" 2024-12-07T00:25:33,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:33,275 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" 2024-12-07T00:25:33,277 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-12-07T00:25:33,305 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testEmptyExportFileSystemState Thread=804 (was 792) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:41549 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-43 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5669 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-42 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:52252 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:58448 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-41 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:52906 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:36891 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:45029 from appattempt_1733530972728_0007_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-40 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_849816958_1 at /127.0.0.1:39310 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36891 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3542) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=802 (was 781) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=608 (was 673), ProcessCount=17 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=3040 (was 3742) 2024-12-07T00:25:33,305 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-07T00:25:33,330 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=804, OpenFileDescriptor=802, MaxFileDescriptor=1048576, SystemLoadAverage=608, ProcessCount=17, AvailableMemoryMB=3037 2024-12-07T00:25:33,330 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=804 is superior to 500 2024-12-07T00:25:33,333 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:25:33,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:25:33,335 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:25:33,335 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:33,335 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 180 2024-12-07T00:25:33,336 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T00:25:33,336 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:25:33,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742237_1413 (size=404) 2024-12-07T00:25:33,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742237_1413 (size=404) 2024-12-07T00:25:33,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742237_1413 (size=404) 2024-12-07T00:25:33,350 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => dad33e11c0c4b25636f80c47867e4fd5, NAME => 'testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:33,351 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 58d6740d4bd1a583fa779ac09c4b1424, NAME => 'testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:33,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742238_1414 (size=65) 2024-12-07T00:25:33,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742238_1414 (size=65) 2024-12-07T00:25:33,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742238_1414 (size=65) 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1681): Closing dad33e11c0c4b25636f80c47867e4fd5, disabling compactions & flushes 2024-12-07T00:25:33,366 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. after waiting 0 ms 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:33,366 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:33,366 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1635): Region close journal for dad33e11c0c4b25636f80c47867e4fd5: 2024-12-07T00:25:33,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742239_1415 (size=65) 2024-12-07T00:25:33,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742239_1415 (size=65) 2024-12-07T00:25:33,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742239_1415 (size=65) 2024-12-07T00:25:33,438 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T00:25:33,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1681): Closing 58d6740d4bd1a583fa779ac09c4b1424, disabling compactions & flushes 2024-12-07T00:25:33,780 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. after waiting 0 ms 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:33,780 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:33,780 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1635): Region close journal for 58d6740d4bd1a583fa779ac09c4b1424: 2024-12-07T00:25:33,781 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:25:33,781 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531133781"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531133781"}]},"ts":"1733531133781"} 2024-12-07T00:25:33,782 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1733531133781"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531133781"}]},"ts":"1733531133781"} 2024-12-07T00:25:33,784 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:25:33,785 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:25:33,785 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531133785"}]},"ts":"1733531133785"} 2024-12-07T00:25:33,786 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-12-07T00:25:33,792 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:25:33,793 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:25:33,793 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:25:33,793 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:25:33,793 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:25:33,793 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:25:33,793 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:25:33,794 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:25:33,794 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, ASSIGN}, {pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, ASSIGN}] 2024-12-07T00:25:33,796 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, ASSIGN 2024-12-07T00:25:33,796 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, ASSIGN 2024-12-07T00:25:33,797 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=182, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:25:33,797 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(264): Starting pid=181, ppid=180, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:25:33,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T00:25:33,948 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:25:33,948 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=58d6740d4bd1a583fa779ac09c4b1424, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:33,948 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=dad33e11c0c4b25636f80c47867e4fd5, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:33,950 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=183, ppid=182, state=RUNNABLE; OpenRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:25:33,951 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=184, ppid=181, state=RUNNABLE; OpenRegionProcedure dad33e11c0c4b25636f80c47867e4fd5, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:25:34,102 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:34,102 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:34,105 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7285): Opening region: {ENCODED => dad33e11c0c4b25636f80c47867e4fd5, NAME => 'testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:25:34,106 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(135): Open testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7285): Opening region: {ENCODED => 58d6740d4bd1a583fa779ac09c4b1424, NAME => 'testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. service=AccessControlService 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. service=AccessControlService 2024-12-07T00:25:34,106 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:34,106 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:34,106 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(894): Instantiated testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:25:34,107 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7327): checking encryption for 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,107 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7327): checking encryption for dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,107 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(7330): checking classloading for 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,107 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(7330): checking classloading for dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,108 INFO [StoreOpener-58d6740d4bd1a583fa779ac09c4b1424-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,108 INFO [StoreOpener-dad33e11c0c4b25636f80c47867e4fd5-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,109 INFO [StoreOpener-dad33e11c0c4b25636f80c47867e4fd5-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region dad33e11c0c4b25636f80c47867e4fd5 columnFamilyName cf 2024-12-07T00:25:34,109 INFO [StoreOpener-58d6740d4bd1a583fa779ac09c4b1424-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 58d6740d4bd1a583fa779ac09c4b1424 columnFamilyName cf 2024-12-07T00:25:34,110 DEBUG [StoreOpener-58d6740d4bd1a583fa779ac09c4b1424-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:34,110 DEBUG [StoreOpener-dad33e11c0c4b25636f80c47867e4fd5-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:25:34,110 INFO [StoreOpener-dad33e11c0c4b25636f80c47867e4fd5-1 {}] regionserver.HStore(327): Store=dad33e11c0c4b25636f80c47867e4fd5/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:34,110 INFO [StoreOpener-58d6740d4bd1a583fa779ac09c4b1424-1 {}] regionserver.HStore(327): Store=58d6740d4bd1a583fa779ac09c4b1424/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:25:34,111 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,111 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,111 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,111 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,113 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1085): writing seq id for dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,113 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1085): writing seq id for 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,115 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:34,115 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1102): Opened 58d6740d4bd1a583fa779ac09c4b1424; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72909344, jitterRate=0.08643388748168945}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:34,116 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegion(1001): Region open journal for 58d6740d4bd1a583fa779ac09c4b1424: 2024-12-07T00:25:34,117 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424., pid=183, masterSystemTime=1733531134102 2024-12-07T00:25:34,118 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:34,118 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=183}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:34,119 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=182 updating hbase:meta row=58d6740d4bd1a583fa779ac09c4b1424, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:34,120 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:25:34,121 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1102): Opened dad33e11c0c4b25636f80c47867e4fd5; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71279316, jitterRate=0.062144577503204346}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:25:34,121 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegion(1001): Region open journal for dad33e11c0c4b25636f80c47867e4fd5: 2024-12-07T00:25:34,122 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5., pid=184, masterSystemTime=1733531134102 2024-12-07T00:25:34,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=183, resume processing ppid=182 2024-12-07T00:25:34,122 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=183, ppid=182, state=SUCCESS; OpenRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424, server=6e6711bf2d40,43281,1733530964846 in 170 msec 2024-12-07T00:25:34,123 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,123 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=184}] handler.AssignRegionHandler(164): Opened testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,124 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=182, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, ASSIGN in 328 msec 2024-12-07T00:25:34,124 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=181 updating hbase:meta row=dad33e11c0c4b25636f80c47867e4fd5, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:34,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=184, resume processing ppid=181 2024-12-07T00:25:34,126 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=184, ppid=181, state=SUCCESS; OpenRegionProcedure dad33e11c0c4b25636f80c47867e4fd5, server=6e6711bf2d40,33095,1733530964767 in 175 msec 2024-12-07T00:25:34,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=181, resume processing ppid=180 2024-12-07T00:25:34,128 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=181, ppid=180, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, ASSIGN in 332 msec 2024-12-07T00:25:34,129 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:25:34,129 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531134129"}]},"ts":"1733531134129"} 2024-12-07T00:25:34,130 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-12-07T00:25:34,132 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=180, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:25:34,133 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-12-07T00:25:34,134 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T00:25:34,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:34,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:34,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:34,137 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,139 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-12-07T00:25:34,140 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=180, state=SUCCESS; CreateTableProcedure table=testtb-testExportWithChecksum in 805 msec 2024-12-07T00:25:34,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-07T00:25:34,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-12-07T00:25:34,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-12-07T00:25:34,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=180 2024-12-07T00:25:34,441 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum, procId: 180 completed 2024-12-07T00:25:34,441 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportWithChecksum get assigned. Timeout = 60000ms 2024-12-07T00:25:34,442 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:34,446 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportWithChecksum assigned to meta. Checking AM states. 2024-12-07T00:25:34,446 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:34,447 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportWithChecksum assigned. 2024-12-07T00:25:34,450 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T00:25:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531134450 (current time:1733531134450). 2024-12-07T00:25:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-07T00:25:34,450 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:34,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74a5ce57 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@47f421e2 2024-12-07T00:25:34,456 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@691c5101, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:34,457 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,459 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37046, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74a5ce57 to 127.0.0.1:60354 2024-12-07T00:25:34,460 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:34,461 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x0a202a9b to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6c3df10c 2024-12-07T00:25:34,477 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@672231d3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:34,496 DEBUG [hconnection-0x5bbee471-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,498 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37048, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,500 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,501 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40320, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x0a202a9b to 127.0.0.1:60354 2024-12-07T00:25:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:34,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T00:25:34,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:34,505 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=185, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T00:25:34,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-07T00:25:34,508 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:34,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T00:25:34,509 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:34,511 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:34,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742240_1416 (size=161) 2024-12-07T00:25:34,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742240_1416 (size=161) 2024-12-07T00:25:34,518 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742240_1416 (size=161) 2024-12-07T00:25:34,520 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:34,520 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5}, {pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424}] 2024-12-07T00:25:34,521 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,521 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T00:25:34,672 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:34,672 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:34,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=186 2024-12-07T00:25:34,673 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=187 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.HRegion(2538): Flush status journal for 58d6740d4bd1a583fa779ac09c4b1424: 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.HRegion(2538): Flush status journal for dad33e11c0c4b25636f80c47867e4fd5: 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. for emptySnaptb0-testExportWithChecksum completed. 2024-12-07T00:25:34,673 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. for emptySnaptb0-testExportWithChecksum completed. 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:34,674 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:25:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742241_1417 (size=68) 2024-12-07T00:25:34,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742241_1417 (size=68) 2024-12-07T00:25:34,698 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742241_1417 (size=68) 2024-12-07T00:25:34,698 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,699 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=186}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=186 2024-12-07T00:25:34,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=186 2024-12-07T00:25:34,699 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,699 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=186, ppid=185, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,702 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=186, ppid=185, state=SUCCESS; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 in 180 msec 2024-12-07T00:25:34,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742242_1418 (size=68) 2024-12-07T00:25:34,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742242_1418 (size=68) 2024-12-07T00:25:34,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742242_1418 (size=68) 2024-12-07T00:25:34,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:34,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=187}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=187 2024-12-07T00:25:34,723 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=187 2024-12-07T00:25:34,723 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,724 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=187, ppid=185, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=187, resume processing ppid=185 2024-12-07T00:25:34,731 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:34,731 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=187, ppid=185, state=SUCCESS; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 in 208 msec 2024-12-07T00:25:34,732 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:34,733 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:34,733 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-12-07T00:25:34,734 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-12-07T00:25:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742243_1419 (size=543) 2024-12-07T00:25:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742243_1419 (size=543) 2024-12-07T00:25:34,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742243_1419 (size=543) 2024-12-07T00:25:34,761 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:34,766 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:34,766 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-12-07T00:25:34,768 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=185, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:34,768 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 185 2024-12-07T00:25:34,769 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=185, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=185, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 264 msec 2024-12-07T00:25:34,810 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=185 2024-12-07T00:25:34,810 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 185 completed 2024-12-07T00:25:34,817 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:34,820 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:25:34,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportWithChecksum 2024-12-07T00:25:34,823 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:34,824 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:25:34,838 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T00:25:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531134839 (current time:1733531134839). 2024-12-07T00:25:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:25:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-12-07T00:25:34,839 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:25:34,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x252d58ee to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@91cbd56 2024-12-07T00:25:34,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@52cda70b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:34,846 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,847 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37054, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x252d58ee to 127.0.0.1:60354 2024-12-07T00:25:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:34,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x74c93794 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@18d3d256 2024-12-07T00:25:34,856 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2799dbb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:25:34,858 DEBUG [hconnection-0x7bb838ee-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,859 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37064, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:25:34,863 INFO [RS-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40336, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:25:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x74c93794 to 127.0.0.1:60354 2024-12-07T00:25:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:25:34,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-12-07T00:25:34,866 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:25:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-12-07T00:25:34,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-07T00:25:34,868 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T00:25:34,868 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:25:34,869 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:25:34,872 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:25:34,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742244_1420 (size=156) 2024-12-07T00:25:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742244_1420 (size=156) 2024-12-07T00:25:34,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742244_1420 (size=156) 2024-12-07T00:25:34,892 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:25:34,892 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5}, {pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424}] 2024-12-07T00:25:34,893 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:34,894 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:34,897 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-12-07T00:25:34,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T00:25:35,044 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:25:35,045 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:25:35,045 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-12-07T00:25:35,045 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:35,046 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-12-07T00:25:35,046 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:35,046 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2837): Flushing 58d6740d4bd1a583fa779ac09c4b1424 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-12-07T00:25:35,046 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2837): Flushing dad33e11c0c4b25636f80c47867e4fd5 1/1 column families, dataSize=266 B heapSize=832 B 2024-12-07T00:25:35,069 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/.tmp/cf/b309ca5796414a27a69985ffae0926e4 is 71, key is 072d7888888f6f888ead67ae154e9551/cf:q/1733531134817/Put/seqid=0 2024-12-07T00:25:35,088 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/.tmp/cf/4ee9c027930748ceb00d3cb4fc4d00ef is 71, key is 1b65da411ddb82d94bf79f77a58d0c18/cf:q/1733531134820/Put/seqid=0 2024-12-07T00:25:35,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742245_1421 (size=5356) 2024-12-07T00:25:35,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742245_1421 (size=5356) 2024-12-07T00:25:35,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742245_1421 (size=5356) 2024-12-07T00:25:35,108 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=266 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/.tmp/cf/b309ca5796414a27a69985ffae0926e4 2024-12-07T00:25:35,115 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/.tmp/cf/b309ca5796414a27a69985ffae0926e4 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4 2024-12-07T00:25:35,121 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4, entries=4, sequenceid=6, filesize=5.2 K 2024-12-07T00:25:35,122 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3040): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for dad33e11c0c4b25636f80c47867e4fd5 in 76ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:35,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2538): Flush status journal for dad33e11c0c4b25636f80c47867e4fd5: 2024-12-07T00:25:35,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. for snaptb0-testExportWithChecksum completed. 2024-12-07T00:25:35,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-07T00:25:35,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:35,122 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4] hfiles 2024-12-07T00:25:35,123 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4 for snapshot=snaptb0-testExportWithChecksum 2024-12-07T00:25:35,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742246_1422 (size=8258) 2024-12-07T00:25:35,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742246_1422 (size=8258) 2024-12-07T00:25:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742246_1422 (size=8258) 2024-12-07T00:25:35,135 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.00 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/.tmp/cf/4ee9c027930748ceb00d3cb4fc4d00ef 2024-12-07T00:25:35,140 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/.tmp/cf/4ee9c027930748ceb00d3cb4fc4d00ef as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef 2024-12-07T00:25:35,146 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef, entries=46, sequenceid=6, filesize=8.1 K 2024-12-07T00:25:35,147 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3040): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 58d6740d4bd1a583fa779ac09c4b1424 in 101ms, sequenceid=6, compaction requested=false 2024-12-07T00:25:35,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2538): Flush status journal for 58d6740d4bd1a583fa779ac09c4b1424: 2024-12-07T00:25:35,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. for snaptb0-testExportWithChecksum completed. 2024-12-07T00:25:35,147 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-12-07T00:25:35,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:25:35,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef] hfiles 2024-12-07T00:25:35,148 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef for snapshot=snaptb0-testExportWithChecksum 2024-12-07T00:25:35,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742247_1423 (size=107) 2024-12-07T00:25:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742247_1423 (size=107) 2024-12-07T00:25:35,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742247_1423 (size=107) 2024-12-07T00:25:35,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:25:35,165 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-12-07T00:25:35,165 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=189 2024-12-07T00:25:35,165 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:35,166 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=189, ppid=188, state=RUNNABLE; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:25:35,169 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=189, ppid=188, state=SUCCESS; SnapshotRegionProcedure dad33e11c0c4b25636f80c47867e4fd5 in 274 msec 2024-12-07T00:25:35,170 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T00:25:35,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742248_1424 (size=107) 2024-12-07T00:25:35,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742248_1424 (size=107) 2024-12-07T00:25:35,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742248_1424 (size=107) 2024-12-07T00:25:35,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:25:35,175 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-12-07T00:25:35,175 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=190 2024-12-07T00:25:35,175 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:35,175 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=190, ppid=188, state=RUNNABLE; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:25:35,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=190, resume processing ppid=188 2024-12-07T00:25:35,180 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:25:35,180 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=190, ppid=188, state=SUCCESS; SnapshotRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424 in 286 msec 2024-12-07T00:25:35,181 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:25:35,182 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:25:35,182 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-12-07T00:25:35,183 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T00:25:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742249_1425 (size=621) 2024-12-07T00:25:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742249_1425 (size=621) 2024-12-07T00:25:35,198 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742249_1425 (size=621) 2024-12-07T00:25:35,206 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:25:35,228 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:25:35,228 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T00:25:35,230 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:25:35,230 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 188 2024-12-07T00:25:35,231 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=188, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 364 msec 2024-12-07T00:25:35,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=188 2024-12-07T00:25:35,472 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum, procId: 188 completed 2024-12-07T00:25:35,472 INFO [Time-limited test {}] snapshot.TestExportSnapshot(476): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472 2024-12-07T00:25:35,472 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:35,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:25:35,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=org.apache.hadoop.fs.LocalFileSystem@3c2334f, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T00:25:35,514 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:25:35,517 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T00:25:35,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:35,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:35,555 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:35,556 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,761 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-2470350596247387417.jar 2024-12-07T00:25:36,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,762 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-15869596861651495918.jar 2024-12-07T00:25:36,832 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:25:36,833 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:25:36,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:25:36,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:25:36,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:25:36,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:25:36,834 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:25:36,835 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:25:36,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:36,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:36,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:36,836 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:36,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:25:36,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:36,837 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:25:36,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742250_1426 (size=127628) 2024-12-07T00:25:36,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742250_1426 (size=127628) 2024-12-07T00:25:36,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742250_1426 (size=127628) 2024-12-07T00:25:36,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T00:25:36,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T00:25:36,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742251_1427 (size=2172101) 2024-12-07T00:25:36,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742252_1428 (size=213228) 2024-12-07T00:25:36,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742252_1428 (size=213228) 2024-12-07T00:25:36,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742252_1428 (size=213228) 2024-12-07T00:25:36,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T00:25:36,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T00:25:36,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742253_1429 (size=1877034) 2024-12-07T00:25:36,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742254_1430 (size=533455) 2024-12-07T00:25:36,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742254_1430 (size=533455) 2024-12-07T00:25:36,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742254_1430 (size=533455) 2024-12-07T00:25:36,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T00:25:36,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T00:25:36,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742255_1431 (size=7280644) 2024-12-07T00:25:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T00:25:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T00:25:36,993 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742256_1432 (size=4188619) 2024-12-07T00:25:37,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742257_1433 (size=20406) 2024-12-07T00:25:37,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742257_1433 (size=20406) 2024-12-07T00:25:37,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742257_1433 (size=20406) 2024-12-07T00:25:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742258_1434 (size=75495) 2024-12-07T00:25:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742258_1434 (size=75495) 2024-12-07T00:25:37,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742258_1434 (size=75495) 2024-12-07T00:25:37,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742259_1435 (size=451756) 2024-12-07T00:25:37,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742259_1435 (size=451756) 2024-12-07T00:25:37,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742259_1435 (size=451756) 2024-12-07T00:25:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742260_1436 (size=45609) 2024-12-07T00:25:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742260_1436 (size=45609) 2024-12-07T00:25:37,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742260_1436 (size=45609) 2024-12-07T00:25:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742261_1437 (size=110084) 2024-12-07T00:25:37,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742261_1437 (size=110084) 2024-12-07T00:25:37,042 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742261_1437 (size=110084) 2024-12-07T00:25:37,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742262_1438 (size=6350154) 2024-12-07T00:25:37,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742262_1438 (size=6350154) 2024-12-07T00:25:37,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742262_1438 (size=6350154) 2024-12-07T00:25:37,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742263_1439 (size=1323991) 2024-12-07T00:25:37,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742263_1439 (size=1323991) 2024-12-07T00:25:37,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742263_1439 (size=1323991) 2024-12-07T00:25:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742264_1440 (size=23076) 2024-12-07T00:25:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742264_1440 (size=23076) 2024-12-07T00:25:37,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742264_1440 (size=23076) 2024-12-07T00:25:37,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742265_1441 (size=126803) 2024-12-07T00:25:37,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742265_1441 (size=126803) 2024-12-07T00:25:37,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742265_1441 (size=126803) 2024-12-07T00:25:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742266_1442 (size=322274) 2024-12-07T00:25:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742266_1442 (size=322274) 2024-12-07T00:25:37,097 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742266_1442 (size=322274) 2024-12-07T00:25:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742267_1443 (size=1832290) 2024-12-07T00:25:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742267_1443 (size=1832290) 2024-12-07T00:25:37,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742267_1443 (size=1832290) 2024-12-07T00:25:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742268_1444 (size=30081) 2024-12-07T00:25:37,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742268_1444 (size=30081) 2024-12-07T00:25:37,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742268_1444 (size=30081) 2024-12-07T00:25:37,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742269_1445 (size=53616) 2024-12-07T00:25:37,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742269_1445 (size=53616) 2024-12-07T00:25:37,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742269_1445 (size=53616) 2024-12-07T00:25:37,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742270_1446 (size=29229) 2024-12-07T00:25:37,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742270_1446 (size=29229) 2024-12-07T00:25:37,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742270_1446 (size=29229) 2024-12-07T00:25:37,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742271_1447 (size=169089) 2024-12-07T00:25:37,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742271_1447 (size=169089) 2024-12-07T00:25:37,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742271_1447 (size=169089) 2024-12-07T00:25:37,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742272_1448 (size=5175431) 2024-12-07T00:25:37,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742272_1448 (size=5175431) 2024-12-07T00:25:37,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742272_1448 (size=5175431) 2024-12-07T00:25:37,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742273_1449 (size=136454) 2024-12-07T00:25:37,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742273_1449 (size=136454) 2024-12-07T00:25:37,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742273_1449 (size=136454) 2024-12-07T00:25:37,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742274_1450 (size=907851) 2024-12-07T00:25:37,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742274_1450 (size=907851) 2024-12-07T00:25:37,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742274_1450 (size=907851) 2024-12-07T00:25:37,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T00:25:37,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T00:25:37,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742275_1451 (size=3317408) 2024-12-07T00:25:37,327 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0007_000001 (auth:SIMPLE) from 127.0.0.1:43606 2024-12-07T00:25:37,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0007/container_1733530972728_0007_01_000001/launch_container.sh] 2024-12-07T00:25:37,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0007/container_1733530972728_0007_01_000001/container_tokens] 2024-12-07T00:25:37,339 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0007/container_1733530972728_0007_01_000001/sysfs] 2024-12-07T00:25:37,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742276_1452 (size=503880) 2024-12-07T00:25:37,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742276_1452 (size=503880) 2024-12-07T00:25:37,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742276_1452 (size=503880) 2024-12-07T00:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T00:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T00:25:37,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742277_1453 (size=4695811) 2024-12-07T00:25:37,624 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:25:37,626 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-07T00:25:37,628 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:25:37,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742278_1454 (size=338) 2024-12-07T00:25:37,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742278_1454 (size=338) 2024-12-07T00:25:37,634 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742278_1454 (size=338) 2024-12-07T00:25:37,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742279_1455 (size=15) 2024-12-07T00:25:37,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742279_1455 (size=15) 2024-12-07T00:25:37,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742279_1455 (size=15) 2024-12-07T00:25:37,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742280_1456 (size=304931) 2024-12-07T00:25:37,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742280_1456 (size=304931) 2024-12-07T00:25:37,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742280_1456 (size=304931) 2024-12-07T00:25:37,670 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:25:37,670 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:25:38,322 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:39496 2024-12-07T00:25:38,551 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:25:42,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:25:43,600 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:44242 2024-12-07T00:25:43,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742281_1457 (size=350605) 2024-12-07T00:25:43,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742281_1457 (size=350605) 2024-12-07T00:25:43,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742281_1457 (size=350605) 2024-12-07T00:25:45,935 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:40996 2024-12-07T00:25:50,110 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000002/launch_container.sh] 2024-12-07T00:25:50,110 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000002/container_tokens] 2024-12-07T00:25:50,111 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_3/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000002/sysfs] 2024-12-07T00:25:50,889 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region dad33e11c0c4b25636f80c47867e4fd5 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:25:50,889 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region aa44abbb59514f8b493e3d258a51d1fb changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:25:50,889 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region c613c7c0d4bf4a8124f2c33a8aabebf4 changed from -1.0 to 0.0, refreshing cache 2024-12-07T00:25:50,890 DEBUG [master/6e6711bf2d40:0.Chore.1 {}] balancer.RegionLocationFinder(172): Locality for region 58d6740d4bd1a583fa779ac09c4b1424 changed from -1.0 to 0.0, refreshing cache Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472/archive/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T00:25:51,721 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:41004 2024-12-07T00:25:55,170 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000003/launch_container.sh] 2024-12-07T00:25:55,170 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000003/container_tokens] 2024-12-07T00:25:55,170 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000003/sysfs] 2024-12-07T00:25:55,635 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region c613c7c0d4bf4a8124f2c33a8aabebf4, had cached 0 bytes from a total of 8460 2024-12-07T00:25:55,639 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region aa44abbb59514f8b493e3d258a51d1fb, had cached 0 bytes from a total of 5149 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472/archive/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T00:25:56,734 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:44456 2024-12-07T00:26:00,296 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000004/launch_container.sh] 2024-12-07T00:26:00,296 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000004/container_tokens] 2024-12-07T00:26:00,296 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000004/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/local-export-1733531135472/archive/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:596) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:332) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:254) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:180) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-12-07T00:26:01,757 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:44464 2024-12-07T00:26:05,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742282_1458 (size=21340) 2024-12-07T00:26:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742282_1458 (size=21340) 2024-12-07T00:26:05,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742282_1458 (size=21340) 2024-12-07T00:26:05,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742283_1459 (size=460) 2024-12-07T00:26:05,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742283_1459 (size=460) 2024-12-07T00:26:05,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742283_1459 (size=460) 2024-12-07T00:26:05,282 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000005/launch_container.sh] 2024-12-07T00:26:05,282 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000005/container_tokens] 2024-12-07T00:26:05,283 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000005/sysfs] 2024-12-07T00:26:05,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742284_1460 (size=21340) 2024-12-07T00:26:05,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742284_1460 (size=21340) 2024-12-07T00:26:05,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742284_1460 (size=21340) 2024-12-07T00:26:05,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742285_1461 (size=350605) 2024-12-07T00:26:05,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742285_1461 (size=350605) 2024-12-07T00:26:05,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742285_1461 (size=350605) 2024-12-07T00:26:05,381 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:53792 2024-12-07T00:26:07,080 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1227): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1733530972728_0008_m_000000 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:935) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1204) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:151) ~[hbase-common-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:523) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:353) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:237) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:26:07,081 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081 2024-12-07T00:26:07,081 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:07,111 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:07,111 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T00:26:07,114 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:26:07,118 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-12-07T00:26:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742286_1462 (size=156) 2024-12-07T00:26:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742287_1463 (size=621) 2024-12-07T00:26:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742286_1462 (size=156) 2024-12-07T00:26:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742287_1463 (size=621) 2024-12-07T00:26:07,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742286_1462 (size=156) 2024-12-07T00:26:07,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742287_1463 (size=621) 2024-12-07T00:26:07,129 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:07,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:07,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:07,130 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-6855925554276745958.jar 2024-12-07T00:26:08,163 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,164 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-662501088735462348.jar 2024-12-07T00:26:08,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:26:08,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:26:08,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:26:08,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:26:08,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:08,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:08,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:08,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:08,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:08,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:08,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:08,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742288_1464 (size=127628) 2024-12-07T00:26:08,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742288_1464 (size=127628) 2024-12-07T00:26:08,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742288_1464 (size=127628) 2024-12-07T00:26:08,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T00:26:08,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T00:26:08,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742289_1465 (size=2172101) 2024-12-07T00:26:08,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742290_1466 (size=213228) 2024-12-07T00:26:08,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742290_1466 (size=213228) 2024-12-07T00:26:08,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742290_1466 (size=213228) 2024-12-07T00:26:08,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742291_1467 (size=1877034) 2024-12-07T00:26:08,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742291_1467 (size=1877034) 2024-12-07T00:26:08,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742291_1467 (size=1877034) 2024-12-07T00:26:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742292_1468 (size=533455) 2024-12-07T00:26:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742292_1468 (size=533455) 2024-12-07T00:26:08,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742292_1468 (size=533455) 2024-12-07T00:26:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742293_1469 (size=7280644) 2024-12-07T00:26:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742293_1469 (size=7280644) 2024-12-07T00:26:08,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742293_1469 (size=7280644) 2024-12-07T00:26:08,439 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742294_1470 (size=4188619) 2024-12-07T00:26:08,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742294_1470 (size=4188619) 2024-12-07T00:26:08,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742294_1470 (size=4188619) 2024-12-07T00:26:08,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742295_1471 (size=20406) 2024-12-07T00:26:08,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742295_1471 (size=20406) 2024-12-07T00:26:08,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742295_1471 (size=20406) 2024-12-07T00:26:08,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742296_1472 (size=75495) 2024-12-07T00:26:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742296_1472 (size=75495) 2024-12-07T00:26:08,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742296_1472 (size=75495) 2024-12-07T00:26:08,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742297_1473 (size=45609) 2024-12-07T00:26:08,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742297_1473 (size=45609) 2024-12-07T00:26:08,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742297_1473 (size=45609) 2024-12-07T00:26:08,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742298_1474 (size=6350154) 2024-12-07T00:26:08,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742298_1474 (size=6350154) 2024-12-07T00:26:08,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742298_1474 (size=6350154) 2024-12-07T00:26:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742299_1475 (size=110084) 2024-12-07T00:26:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742299_1475 (size=110084) 2024-12-07T00:26:08,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742299_1475 (size=110084) 2024-12-07T00:26:08,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T00:26:08,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T00:26:08,987 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742300_1476 (size=1323991) 2024-12-07T00:26:09,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742301_1477 (size=23076) 2024-12-07T00:26:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742301_1477 (size=23076) 2024-12-07T00:26:09,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742301_1477 (size=23076) 2024-12-07T00:26:09,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742302_1478 (size=126803) 2024-12-07T00:26:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742302_1478 (size=126803) 2024-12-07T00:26:09,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742302_1478 (size=126803) 2024-12-07T00:26:09,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742303_1479 (size=322274) 2024-12-07T00:26:09,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742303_1479 (size=322274) 2024-12-07T00:26:09,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742303_1479 (size=322274) 2024-12-07T00:26:09,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T00:26:09,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T00:26:09,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742304_1480 (size=1832290) 2024-12-07T00:26:09,088 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742305_1481 (size=451756) 2024-12-07T00:26:09,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742305_1481 (size=451756) 2024-12-07T00:26:09,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742305_1481 (size=451756) 2024-12-07T00:26:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742306_1482 (size=30081) 2024-12-07T00:26:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742306_1482 (size=30081) 2024-12-07T00:26:09,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742306_1482 (size=30081) 2024-12-07T00:26:09,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742307_1483 (size=53616) 2024-12-07T00:26:09,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742307_1483 (size=53616) 2024-12-07T00:26:09,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742307_1483 (size=53616) 2024-12-07T00:26:09,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742308_1484 (size=29229) 2024-12-07T00:26:09,168 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742308_1484 (size=29229) 2024-12-07T00:26:09,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742308_1484 (size=29229) 2024-12-07T00:26:09,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742309_1485 (size=169089) 2024-12-07T00:26:09,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742309_1485 (size=169089) 2024-12-07T00:26:09,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742309_1485 (size=169089) 2024-12-07T00:26:09,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742310_1486 (size=5175431) 2024-12-07T00:26:09,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742310_1486 (size=5175431) 2024-12-07T00:26:09,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742310_1486 (size=5175431) 2024-12-07T00:26:09,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742311_1487 (size=136454) 2024-12-07T00:26:09,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742311_1487 (size=136454) 2024-12-07T00:26:09,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742311_1487 (size=136454) 2024-12-07T00:26:09,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742312_1488 (size=907851) 2024-12-07T00:26:09,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742312_1488 (size=907851) 2024-12-07T00:26:09,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742312_1488 (size=907851) 2024-12-07T00:26:09,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T00:26:09,317 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T00:26:09,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742313_1489 (size=3317408) 2024-12-07T00:26:09,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742314_1490 (size=503880) 2024-12-07T00:26:09,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742314_1490 (size=503880) 2024-12-07T00:26:09,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742314_1490 (size=503880) 2024-12-07T00:26:09,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T00:26:09,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T00:26:09,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742315_1491 (size=4695811) 2024-12-07T00:26:09,400 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:26:09,403 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-12-07T00:26:09,405 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:26:09,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742316_1492 (size=338) 2024-12-07T00:26:09,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742316_1492 (size=338) 2024-12-07T00:26:09,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742316_1492 (size=338) 2024-12-07T00:26:09,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742317_1493 (size=15) 2024-12-07T00:26:09,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742317_1493 (size=15) 2024-12-07T00:26:09,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742317_1493 (size=15) 2024-12-07T00:26:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742318_1494 (size=304877) 2024-12-07T00:26:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742318_1494 (size=304877) 2024-12-07T00:26:09,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742318_1494 (size=304877) 2024-12-07T00:26:11,456 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:26:11,456 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:26:11,461 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0008_000001 (auth:SIMPLE) from 127.0.0.1:53800 2024-12-07T00:26:11,472 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000001/launch_container.sh] 2024-12-07T00:26:11,472 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000001/container_tokens] 2024-12-07T00:26:11,472 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_2/usercache/jenkins/appcache/application_1733530972728_0008/container_1733530972728_0008_01_000001/sysfs] 2024-12-07T00:26:12,341 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0009_000001 (auth:SIMPLE) from 127.0.0.1:35004 2024-12-07T00:26:12,674 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:26:17,527 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0009_000001 (auth:SIMPLE) from 127.0.0.1:53460 2024-12-07T00:26:17,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742319_1495 (size=350551) 2024-12-07T00:26:17,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742319_1495 (size=350551) 2024-12-07T00:26:17,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742319_1495 (size=350551) 2024-12-07T00:26:19,107 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region 58d6740d4bd1a583fa779ac09c4b1424, had cached 0 bytes from a total of 8258 2024-12-07T00:26:19,107 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region dad33e11c0c4b25636f80c47867e4fd5, had cached 0 bytes from a total of 5356 2024-12-07T00:26:19,838 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0009_000001 (auth:SIMPLE) from 127.0.0.1:56690 2024-12-07T00:26:23,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742320_1496 (size=8258) 2024-12-07T00:26:23,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742320_1496 (size=8258) 2024-12-07T00:26:23,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742320_1496 (size=8258) 2024-12-07T00:26:23,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742321_1497 (size=5356) 2024-12-07T00:26:23,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742321_1497 (size=5356) 2024-12-07T00:26:23,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742321_1497 (size=5356) 2024-12-07T00:26:23,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742322_1498 (size=17413) 2024-12-07T00:26:23,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742322_1498 (size=17413) 2024-12-07T00:26:23,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742322_1498 (size=17413) 2024-12-07T00:26:23,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742323_1499 (size=462) 2024-12-07T00:26:23,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742323_1499 (size=462) 2024-12-07T00:26:23,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742323_1499 (size=462) 2024-12-07T00:26:23,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742324_1500 (size=17413) 2024-12-07T00:26:23,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742324_1500 (size=17413) 2024-12-07T00:26:23,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742324_1500 (size=17413) 2024-12-07T00:26:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742325_1501 (size=350551) 2024-12-07T00:26:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742325_1501 (size=350551) 2024-12-07T00:26:24,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742325_1501 (size=350551) 2024-12-07T00:26:24,017 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0009_000001 (auth:SIMPLE) from 127.0.0.1:46220 2024-12-07T00:26:25,627 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:26:25,628 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:26:25,634 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportWithChecksum 2024-12-07T00:26:25,634 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:26:25,634 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:26:25,634 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T00:26:25,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-07T00:26:25,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-07T00:26:25,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-12-07T00:26:25,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-12-07T00:26:25,635 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531167081/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-12-07T00:26:25,640 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportWithChecksum 2024-12-07T00:26:25,641 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-12-07T00:26:25,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T00:26:25,643 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531185643"}]},"ts":"1733531185643"} 2024-12-07T00:26:25,645 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-12-07T00:26:25,647 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-12-07T00:26:25,647 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-12-07T00:26:25,648 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, UNASSIGN}] 2024-12-07T00:26:25,649 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, UNASSIGN 2024-12-07T00:26:25,649 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, UNASSIGN 2024-12-07T00:26:25,650 INFO [PEWorker-3 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=dad33e11c0c4b25636f80c47867e4fd5, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:25,650 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=58d6740d4bd1a583fa779ac09c4b1424, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:25,651 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:26:25,651 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=195, ppid=193, state=RUNNABLE; CloseRegionProcedure dad33e11c0c4b25636f80c47867e4fd5, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:26:25,651 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:26:25,652 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=196, ppid=194, state=RUNNABLE; CloseRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:26:25,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T00:26:25,803 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:25,803 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(124): Close dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:26:25,803 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:26:25,803 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1681): Closing dad33e11c0c4b25636f80c47867e4fd5, disabling compactions & flushes 2024-12-07T00:26:25,803 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:26:25,803 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. after waiting 0 ms 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:26:25,804 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(124): Close 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1681): Closing 58d6740d4bd1a583fa779ac09c4b1424, disabling compactions & flushes 2024-12-07T00:26:25,804 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1703): Closing region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. after waiting 0 ms 2024-12-07T00:26:25,804 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:26:25,808 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:26:25,808 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:26:25,809 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5. 2024-12-07T00:26:25,809 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1635): Region close journal for dad33e11c0c4b25636f80c47867e4fd5: 2024-12-07T00:26:25,809 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:26:25,809 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:26:25,809 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1922): Closed testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424. 2024-12-07T00:26:25,809 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1635): Region close journal for 58d6740d4bd1a583fa779ac09c4b1424: 2024-12-07T00:26:25,810 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(170): Closed dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:26:25,811 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=193 updating hbase:meta row=dad33e11c0c4b25636f80c47867e4fd5, regionState=CLOSED 2024-12-07T00:26:25,811 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(170): Closed 58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:26:25,811 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=194 updating hbase:meta row=58d6740d4bd1a583fa779ac09c4b1424, regionState=CLOSED 2024-12-07T00:26:25,813 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=195, resume processing ppid=193 2024-12-07T00:26:25,814 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=195, ppid=193, state=SUCCESS; CloseRegionProcedure dad33e11c0c4b25636f80c47867e4fd5, server=6e6711bf2d40,33095,1733530964767 in 161 msec 2024-12-07T00:26:25,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=196, resume processing ppid=194 2024-12-07T00:26:25,814 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=196, ppid=194, state=SUCCESS; CloseRegionProcedure 58d6740d4bd1a583fa779ac09c4b1424, server=6e6711bf2d40,43281,1733530964846 in 160 msec 2024-12-07T00:26:25,815 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=193, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=dad33e11c0c4b25636f80c47867e4fd5, UNASSIGN in 166 msec 2024-12-07T00:26:25,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=194, resume processing ppid=192 2024-12-07T00:26:25,816 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=194, ppid=192, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=58d6740d4bd1a583fa779ac09c4b1424, UNASSIGN in 166 msec 2024-12-07T00:26:25,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=192, resume processing ppid=191 2024-12-07T00:26:25,818 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=192, ppid=191, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 170 msec 2024-12-07T00:26:25,819 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531185819"}]},"ts":"1733531185819"} 2024-12-07T00:26:25,820 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-12-07T00:26:25,822 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-12-07T00:26:25,824 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=191, state=SUCCESS; DisableTableProcedure table=testtb-testExportWithChecksum in 181 msec 2024-12-07T00:26:25,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=191 2024-12-07T00:26:25,945 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum, procId: 191 completed 2024-12-07T00:26:25,945 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-12-07T00:26:25,946 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,947 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportWithChecksum 2024-12-07T00:26:25,948 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,949 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-12-07T00:26:25,951 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:26:25,951 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:26:25,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,952 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,953 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T00:26:25,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T00:26:25,954 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/recovered.edits] 2024-12-07T00:26:25,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T00:26:25,954 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-12-07T00:26:25,955 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/recovered.edits] 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:25,956 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:25,957 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-07T00:26:25,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:25,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:25,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:25,958 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:25,959 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/cf/b309ca5796414a27a69985ffae0926e4 2024-12-07T00:26:25,960 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/cf/4ee9c027930748ceb00d3cb4fc4d00ef 2024-12-07T00:26:25,962 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5/recovered.edits/9.seqid 2024-12-07T00:26:25,963 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/dad33e11c0c4b25636f80c47867e4fd5 2024-12-07T00:26:25,963 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424/recovered.edits/9.seqid 2024-12-07T00:26:25,964 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportWithChecksum/58d6740d4bd1a583fa779ac09c4b1424 2024-12-07T00:26:25,964 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-12-07T00:26:25,966 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,976 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-12-07T00:26:25,978 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-12-07T00:26:25,979 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,979 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-12-07T00:26:25,979 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531185979"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:25,979 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531185979"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:25,981 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:26:25,981 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => dad33e11c0c4b25636f80c47867e4fd5, NAME => 'testtb-testExportWithChecksum,,1733531133332.dad33e11c0c4b25636f80c47867e4fd5.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 58d6740d4bd1a583fa779ac09c4b1424, NAME => 'testtb-testExportWithChecksum,1,1733531133332.58d6740d4bd1a583fa779ac09c4b1424.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:26:25,982 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-12-07T00:26:25,982 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531185982"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:25,983 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportWithChecksum state from META 2024-12-07T00:26:25,985 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-12-07T00:26:25,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=197, state=SUCCESS; DeleteTableProcedure table=testtb-testExportWithChecksum in 39 msec 2024-12-07T00:26:26,058 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=197 2024-12-07T00:26:26,058 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum, procId: 197 completed 2024-12-07T00:26:26,065 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" 2024-12-07T00:26:26,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-12-07T00:26:26,068 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" 2024-12-07T00:26:26,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportWithChecksum 2024-12-07T00:26:26,096 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportWithChecksum Thread=797 (was 804), OpenFileDescriptor=792 (was 802), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=489 (was 608), ProcessCount=17 (was 17), AvailableMemoryMB=2673 (was 3037) 2024-12-07T00:26:26,096 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-07T00:26:26,116 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=797, OpenFileDescriptor=792, MaxFileDescriptor=1048576, SystemLoadAverage=489, ProcessCount=17, AvailableMemoryMB=2672 2024-12-07T00:26:26,116 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-12-07T00:26:26,117 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster$4(2389): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1'}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-12-07T00:26:26,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,123 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-12-07T00:26:26,123 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:26:26,123 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(713): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 198 2024-12-07T00:26:26,124 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-12-07T00:26:26,125 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T00:26:26,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742326_1502 (size=418) 2024-12-07T00:26:26,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742326_1502 (size=418) 2024-12-07T00:26:26,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742326_1502 (size=418) 2024-12-07T00:26:26,141 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7106): creating {ENCODED => f4fb4bb36db4edca7b6a0825c61f95de, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:26,141 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7106): creating {ENCODED => 2be57a594b760c2bdfd27394b33fb2a7, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742328_1504 (size=79) 2024-12-07T00:26:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742328_1504 (size=79) 2024-12-07T00:26:26,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742328_1504 (size=79) 2024-12-07T00:26:26,166 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:26:26,166 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1681): Closing f4fb4bb36db4edca7b6a0825c61f95de, disabling compactions & flushes 2024-12-07T00:26:26,166 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,167 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,167 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. after waiting 0 ms 2024-12-07T00:26:26,167 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,167 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,167 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1635): Region close journal for f4fb4bb36db4edca7b6a0825c61f95de: 2024-12-07T00:26:26,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742327_1503 (size=79) 2024-12-07T00:26:26,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742327_1503 (size=79) 2024-12-07T00:26:26,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742327_1503 (size=79) 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1681): Closing 2be57a594b760c2bdfd27394b33fb2a7, disabling compactions & flushes 2024-12-07T00:26:26,176 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. after waiting 0 ms 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,176 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,176 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1635): Region close journal for 2be57a594b760c2bdfd27394b33fb2a7: 2024-12-07T00:26:26,184 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-12-07T00:26:26,184 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733531186184"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531186184"}]},"ts":"1733531186184"} 2024-12-07T00:26:26,184 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1733531186184"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1733531186184"}]},"ts":"1733531186184"} 2024-12-07T00:26:26,186 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1516): Added 2 regions to meta. 2024-12-07T00:26:26,187 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-12-07T00:26:26,187 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531186187"}]},"ts":"1733531186187"} 2024-12-07T00:26:26,188 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-12-07T00:26:26,192 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(202): Hosts are {6e6711bf2d40=0} racks are {/default-rack=0} 2024-12-07T00:26:26,193 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 is on host 0 2024-12-07T00:26:26,193 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 is on host 0 2024-12-07T00:26:26,193 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 is on host 0 2024-12-07T00:26:26,193 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 0 is on rack 0 2024-12-07T00:26:26,193 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 1 is on rack 0 2024-12-07T00:26:26,193 INFO [PEWorker-1 {}] balancer.BalancerClusterState(314): server 2 is on rack 0 2024-12-07T00:26:26,194 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(319): Number of tables=1, number of hosts=1, number of racks=1 2024-12-07T00:26:26,194 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, ASSIGN}] 2024-12-07T00:26:26,195 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, ASSIGN 2024-12-07T00:26:26,196 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(264): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, ASSIGN; state=OFFLINE, location=6e6711bf2d40,33095,1733530964767; forceNewPlan=false, retain=false 2024-12-07T00:26:26,197 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, ASSIGN 2024-12-07T00:26:26,197 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(264): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, locked=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, ASSIGN; state=OFFLINE, location=6e6711bf2d40,43281,1733530964846; forceNewPlan=false, retain=false 2024-12-07T00:26:26,226 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T00:26:26,346 INFO [6e6711bf2d40:35025 {}] balancer.BaseLoadBalancer(546): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-12-07T00:26:26,347 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=f4fb4bb36db4edca7b6a0825c61f95de, regionState=OPENING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:26,347 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=2be57a594b760c2bdfd27394b33fb2a7, regionState=OPENING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:26,348 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=201, ppid=200, state=RUNNABLE; OpenRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:26:26,349 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=202, ppid=199, state=RUNNABLE; OpenRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:26:26,427 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T00:26:26,500 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:26,501 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:26,503 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,503 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7285): Opening region: {ENCODED => 2be57a594b760c2bdfd27394b33fb2a7, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.', STARTKEY => '1', ENDKEY => ''} 2024-12-07T00:26:26,503 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(135): Open testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,503 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. service=AccessControlService 2024-12-07T00:26:26,503 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7285): Opening region: {ENCODED => f4fb4bb36db4edca7b6a0825c61f95de, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.', STARTKEY => '', ENDKEY => '1'} 2024-12-07T00:26:26,503 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7999): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. service=AccessControlService 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:26:26,504 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7327): checking encryption for 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7330): checking classloading for 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(894): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7327): checking encryption for f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,504 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7330): checking classloading for f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,506 INFO [StoreOpener-2be57a594b760c2bdfd27394b33fb2a7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,506 INFO [StoreOpener-f4fb4bb36db4edca7b6a0825c61f95de-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,507 INFO [StoreOpener-f4fb4bb36db4edca7b6a0825c61f95de-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region f4fb4bb36db4edca7b6a0825c61f95de columnFamilyName cf 2024-12-07T00:26:26,507 INFO [StoreOpener-2be57a594b760c2bdfd27394b33fb2a7-1 {}] compactions.CompactionConfiguration(181): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2be57a594b760c2bdfd27394b33fb2a7 columnFamilyName cf 2024-12-07T00:26:26,507 DEBUG [StoreOpener-2be57a594b760c2bdfd27394b33fb2a7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:26:26,507 DEBUG [StoreOpener-f4fb4bb36db4edca7b6a0825c61f95de-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-12-07T00:26:26,508 INFO [StoreOpener-2be57a594b760c2bdfd27394b33fb2a7-1 {}] regionserver.HStore(327): Store=2be57a594b760c2bdfd27394b33fb2a7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:26:26,508 INFO [StoreOpener-f4fb4bb36db4edca7b6a0825c61f95de-1 {}] regionserver.HStore(327): Store=f4fb4bb36db4edca7b6a0825c61f95de/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-12-07T00:26:26,509 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,509 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,509 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,509 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5301): Found 0 recovered edits file(s) under hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,511 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1085): writing seq id for 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,511 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1085): writing seq id for f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,512 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:26:26,512 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-12-07T00:26:26,513 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1102): Opened 2be57a594b760c2bdfd27394b33fb2a7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60469019, jitterRate=-0.09894140064716339}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:26:26,513 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1102): Opened f4fb4bb36db4edca7b6a0825c61f95de; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=61239489, jitterRate=-0.08746050298213959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-12-07T00:26:26,513 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1001): Region open journal for 2be57a594b760c2bdfd27394b33fb2a7: 2024-12-07T00:26:26,514 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1001): Region open journal for f4fb4bb36db4edca7b6a0825c61f95de: 2024-12-07T00:26:26,514 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7., pid=201, masterSystemTime=1733531186500 2024-12-07T00:26:26,514 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2601): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de., pid=202, masterSystemTime=1733531186501 2024-12-07T00:26:26,515 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,516 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,516 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=200 updating hbase:meta row=2be57a594b760c2bdfd27394b33fb2a7, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:26,516 DEBUG [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2628): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,516 INFO [RS_OPEN_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(164): Opened testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,518 INFO [PEWorker-2 {}] assignment.RegionStateStore(202): pid=199 updating hbase:meta row=f4fb4bb36db4edca7b6a0825c61f95de, regionState=OPEN, openSeqNum=2, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:26,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=201, resume processing ppid=200 2024-12-07T00:26:26,520 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=201, ppid=200, state=SUCCESS; OpenRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7, server=6e6711bf2d40,43281,1733530964846 in 169 msec 2024-12-07T00:26:26,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=202, resume processing ppid=199 2024-12-07T00:26:26,521 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=200, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, ASSIGN in 326 msec 2024-12-07T00:26:26,521 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=202, ppid=199, state=SUCCESS; OpenRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de, server=6e6711bf2d40,33095,1733530964767 in 170 msec 2024-12-07T00:26:26,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=199, resume processing ppid=198 2024-12-07T00:26:26,522 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=199, ppid=198, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, ASSIGN in 327 msec 2024-12-07T00:26:26,523 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-12-07T00:26:26,523 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531186523"}]},"ts":"1733531186523"} 2024-12-07T00:26:26,524 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-12-07T00:26:26,527 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, locked=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-12-07T00:26:26,527 DEBUG [PEWorker-5 {}] access.PermissionStorage(175): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-12-07T00:26:26,529 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T00:26:26,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:26,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:26,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:26,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:26,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,540 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,541 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:26,542 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=198, state=SUCCESS; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 422 msec 2024-12-07T00:26:26,728 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=198 2024-12-07T00:26:26,728 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 198 completed 2024-12-07T00:26:26,728 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(3531): Waiting until all regions of table testtb-testExportFileSystemStateWithSkipTmp get assigned. Timeout = 60000ms 2024-12-07T00:26:26,728 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:26:26,731 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43281 {}] regionserver.StoreScanner(1133): Switch to stream read (scanned=32795 bytes) of info 2024-12-07T00:26:26,741 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3585): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned to meta. Checking AM states. 2024-12-07T00:26:26,741 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:26:26,741 INFO [Time-limited test {}] hbase.HBaseTestingUtility(3605): All regions for table testtb-testExportFileSystemStateWithSkipTmp assigned. 2024-12-07T00:26:26,743 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T00:26:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531186743 (current time:1733531186743). 2024-12-07T00:26:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:26:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-07T00:26:26,744 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:26:26,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x26535b62 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@58331c8e 2024-12-07T00:26:26,748 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4502c26e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:26:26,749 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:26,750 INFO [RS-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44556, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x26535b62 to 127.0.0.1:60354 2024-12-07T00:26:26,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:26:26,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x388480c1 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@78932c33 2024-12-07T00:26:26,758 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b5180fd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:26:26,759 DEBUG [hconnection-0x2dfc1dda-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:26,760 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44568, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:26,762 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:26,763 INFO [RS-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40372, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x388480c1 to 127.0.0.1:60354 2024-12-07T00:26:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:26:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T00:26:26,764 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:26:26,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T00:26:26,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-07T00:26:26,766 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:26:26,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T00:26:26,767 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:26:26,769 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:26:26,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742329_1505 (size=203) 2024-12-07T00:26:26,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742329_1505 (size=203) 2024-12-07T00:26:26,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742329_1505 (size=203) 2024-12-07T00:26:26,778 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:26:26,778 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de}, {pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7}] 2024-12-07T00:26:26,779 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,779 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T00:26:26,930 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:26,930 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:26,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-12-07T00:26:26,930 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2538): Flush status journal for f4fb4bb36db4edca7b6a0825c61f95de: 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2538): Flush status journal for 2be57a594b760c2bdfd27394b33fb2a7: 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:26:26,931 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-12-07T00:26:26,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742331_1507 (size=82) 2024-12-07T00:26:26,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742331_1507 (size=82) 2024-12-07T00:26:26,943 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742330_1506 (size=82) 2024-12-07T00:26:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742330_1506 (size=82) 2024-12-07T00:26:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742331_1507 (size=82) 2024-12-07T00:26:26,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742330_1506 (size=82) 2024-12-07T00:26:26,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:26,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:26,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-12-07T00:26:26,944 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-12-07T00:26:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=204 2024-12-07T00:26:26,945 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,945 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=205 2024-12-07T00:26:26,945 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,945 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=204, ppid=203, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:26,945 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=205, ppid=203, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:26,948 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=205, ppid=203, state=SUCCESS; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 in 169 msec 2024-12-07T00:26:26,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=204, resume processing ppid=203 2024-12-07T00:26:26,949 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=204, ppid=203, state=SUCCESS; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de in 169 msec 2024-12-07T00:26:26,949 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:26:26,950 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:26:26,951 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:26:26,951 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,952 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742332_1508 (size=585) 2024-12-07T00:26:26,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742332_1508 (size=585) 2024-12-07T00:26:26,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742332_1508 (size=585) 2024-12-07T00:26:26,966 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:26:26,970 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:26:26,970 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:26,971 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:26:26,972 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-12-07T00:26:26,973 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=203, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 207 msec 2024-12-07T00:26:27,068 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=203 2024-12-07T00:26:27,068 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 203 completed 2024-12-07T00:26:27,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33095 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:26:27,075 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43281 {}] regionserver.HRegion(8254): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. with WAL disabled. Data may be lost in the event of a crash. 2024-12-07T00:26:27,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2718): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,078 DEBUG [Time-limited test {}] hbase.HBaseTestingUtility(2724): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:27,079 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-12-07T00:26:27,090 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1703): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T00:26:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(331): Creation time not specified, setting to:1733531187090 (current time:1733531187090). 2024-12-07T00:26:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(345): Snapshot current TTL value: 0 resetting it to default value: 0 2024-12-07T00:26:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(354): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-12-07T00:26:27,090 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotDescriptionUtils(361): Set jenkins as owner of Snapshot 2024-12-07T00:26:27,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x34e6a779 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@c668fcb 2024-12-07T00:26:27,095 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e6e9d31, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:26:27,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:27,098 INFO [RS-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44574, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x34e6a779 to 127.0.0.1:60354 2024-12-07T00:26:27,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:26:27,100 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(149): Connect 0x52b38bc4 to 127.0.0.1:60354 with session timeout=90000ms, retries=30, retry interval=1000ms, keepAlive=60000ms, zk client config=org.apache.zookeeper.client.ZKClientConfig@6a934e8e 2024-12-07T00:26:27,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@403ed1f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-12-07T00:26:27,105 DEBUG [hconnection-0x6ae6e08b-metaLookup-shared--pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:27,106 INFO [RS-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44576, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:27,107 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-12-07T00:26:27,108 INFO [RS-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40378, version=2.7.0-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-12-07T00:26:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x52b38bc4 to 127.0.0.1:60354 2024-12-07T00:26:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:26:27,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] access.PermissionStorage(611): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-12-07T00:26:27,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(806): No existing snapshot, attempting snapshot... 2024-12-07T00:26:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-12-07T00:26:27,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(1441): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-07T00:26:27,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-12-07T00:26:27,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T00:26:27,112 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-12-07T00:26:27,114 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-12-07T00:26:27,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742333_1509 (size=198) 2024-12-07T00:26:27,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742333_1509 (size=198) 2024-12-07T00:26:27,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742333_1509 (size=198) 2024-12-07T00:26:27,125 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-12-07T00:26:27,125 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de}, {pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7}] 2024-12-07T00:26:27,126 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:27,126 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:27,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T00:26:27,277 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:27,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43281 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-12-07T00:26:27,278 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:27,278 DEBUG [RSProcedureDispatcher-pool-1 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:27,278 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2837): Flushing 2be57a594b760c2bdfd27394b33fb2a7 1/1 column families, dataSize=2.80 KB heapSize=6.30 KB 2024-12-07T00:26:27,278 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=33095 {}] regionserver.RSRpcServices(3992): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-12-07T00:26:27,279 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:27,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2837): Flushing f4fb4bb36db4edca7b6a0825c61f95de 1/1 column families, dataSize=467 B heapSize=1.23 KB 2024-12-07T00:26:27,305 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/.tmp/cf/44ff2ac42a284d01a1099ec9529954cb is 71, key is 0132990ad1e6ca4bdd65cb759c4834ab/cf:q/1733531187075/Put/seqid=0 2024-12-07T00:26:27,324 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/.tmp/cf/97268340876b4d50ac985c72db94bce2 is 71, key is 1d4950b3ca82019d514e9a1322aa0a7e/cf:q/1733531187075/Put/seqid=0 2024-12-07T00:26:27,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742334_1510 (size=5566) 2024-12-07T00:26:27,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742334_1510 (size=5566) 2024-12-07T00:26:27,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742334_1510 (size=5566) 2024-12-07T00:26:27,334 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=467 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/.tmp/cf/44ff2ac42a284d01a1099ec9529954cb 2024-12-07T00:26:27,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/.tmp/cf/44ff2ac42a284d01a1099ec9529954cb as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb 2024-12-07T00:26:27,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742335_1511 (size=8054) 2024-12-07T00:26:27,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742335_1511 (size=8054) 2024-12-07T00:26:27,345 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb, entries=7, sequenceid=6, filesize=5.4 K 2024-12-07T00:26:27,346 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3040): Finished flush of dataSize ~467 B/467, heapSize ~1.22 KB/1248, currentSize=0 B/0 for f4fb4bb36db4edca7b6a0825c61f95de in 67ms, sequenceid=6, compaction requested=false 2024-12-07T00:26:27,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2538): Flush status journal for f4fb4bb36db4edca7b6a0825c61f95de: 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb] hfiles 2024-12-07T00:26:27,348 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742335_1511 (size=8054) 2024-12-07T00:26:27,349 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.80 KB at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/.tmp/cf/97268340876b4d50ac985c72db94bce2 2024-12-07T00:26:27,359 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/.tmp/cf/97268340876b4d50ac985c72db94bce2 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2 2024-12-07T00:26:27,366 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2, entries=43, sequenceid=6, filesize=7.9 K 2024-12-07T00:26:27,367 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3040): Finished flush of dataSize ~2.80 KB/2869, heapSize ~6.28 KB/6432, currentSize=0 B/0 for 2be57a594b760c2bdfd27394b33fb2a7 in 88ms, sequenceid=6, compaction requested=false 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2538): Flush status journal for 2be57a594b760c2bdfd27394b33fb2a7: 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2] hfiles 2024-12-07T00:26:27,367 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742336_1512 (size=121) 2024-12-07T00:26:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742336_1512 (size=121) 2024-12-07T00:26:27,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742336_1512 (size=121) 2024-12-07T00:26:27,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:27,376 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-12-07T00:26:27,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=207 2024-12-07T00:26:27,376 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:27,377 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=207, ppid=206, state=RUNNABLE; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:27,379 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=207, ppid=206, state=SUCCESS; SnapshotRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de in 253 msec 2024-12-07T00:26:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742337_1513 (size=121) 2024-12-07T00:26:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742337_1513 (size=121) 2024-12-07T00:26:27,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742337_1513 (size=121) 2024-12-07T00:26:27,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:27,386 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/6e6711bf2d40:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-12-07T00:26:27,386 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster(4106): Remote procedure done, pid=208 2024-12-07T00:26:27,386 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:27,386 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=208, ppid=206, state=RUNNABLE; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:27,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=208, resume processing ppid=206 2024-12-07T00:26:27,388 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=208, ppid=206, state=SUCCESS; SnapshotRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7 in 262 msec 2024-12-07T00:26:27,388 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-12-07T00:26:27,389 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-12-07T00:26:27,390 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-12-07T00:26:27,390 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,391 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742338_1514 (size=663) 2024-12-07T00:26:27,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742338_1514 (size=663) 2024-12-07T00:26:27,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742338_1514 (size=663) 2024-12-07T00:26:27,401 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-12-07T00:26:27,406 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-12-07T00:26:27,406 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(430): Sentinel is done, just moving the snapshot from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,408 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, locked=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-12-07T00:26:27,408 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1447): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-12-07T00:26:27,409 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=206, state=SUCCESS; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 298 msec 2024-12-07T00:26:27,414 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=206 2024-12-07T00:26:27,414 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 206 completed 2024-12-07T00:26:27,414 INFO [Time-limited test {}] snapshot.TestExportSnapshot(468): HDFS export destination path: hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414 2024-12-07T00:26:27,414 INFO [Time-limited test {}] snapshot.TestExportSnapshot(495): tgtFsUri=hdfs://localhost:46157, tgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414, rawTgtDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414, srcFsUri=hdfs://localhost:46157, srcDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:27,459 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1082): inputFs=hdfs://localhost:46157, inputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45 2024-12-07T00:26:27,459 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1083): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,461 INFO [Time-limited test {}] snapshot.ExportSnapshot(1092): Verify the source snapshot's expiration status and integrity. 2024-12-07T00:26:27,466 INFO [Time-limited test {}] snapshot.ExportSnapshot(1150): Copy Snapshot Manifest from hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:27,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742339_1515 (size=198) 2024-12-07T00:26:27,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742339_1515 (size=198) 2024-12-07T00:26:27,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742339_1515 (size=198) 2024-12-07T00:26:27,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742340_1516 (size=663) 2024-12-07T00:26:27,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742340_1516 (size=663) 2024-12-07T00:26:27,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742340_1516 (size=663) 2024-12-07T00:26:27,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-common/target/hbase-common-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:27,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol/target/hbase-protocol-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:27,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-protocol-shaded/target/hbase-protocol-shaded-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:27,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-client/target/hbase-client-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-9856232722443911761.jar 2024-12-07T00:26:28,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop-compat/target/hbase-hadoop-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,735 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-hadoop2-compat/target/hbase-hadoop2-compat-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,822 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop-1687851092687946885.jar 2024-12-07T00:26:28,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics/target/hbase-metrics-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-metrics-api/target/hbase-metrics-api-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,823 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-replication/target/hbase-replication-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-http/target/hbase-http-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-procedure/target/hbase-procedure-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,824 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-zookeeper/target/hbase-zookeeper-2.7.0-SNAPSHOT.jar 2024-12-07T00:26:28,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-12-07T00:26:28,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-12-07T00:26:28,825 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-12-07T00:26:28,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-12-07T00:26:28,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-12-07T00:26:28,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-12-07T00:26:28,826 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.google.protobuf.Message, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/com/google/protobuf/protobuf-java/2.5.0/protobuf-java-2.5.0.jar 2024-12-07T00:26:28,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-12-07T00:26:28,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-12-07T00:26:28,827 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-12-07T00:26:28,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-12-07T00:26:28,828 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-12-07T00:26:28,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:28,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:28,829 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:28,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:28,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-12-07T00:26:28,830 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:28,831 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(923): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-12-07T00:26:28,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742341_1517 (size=127628) 2024-12-07T00:26:28,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742341_1517 (size=127628) 2024-12-07T00:26:28,937 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742341_1517 (size=127628) 2024-12-07T00:26:28,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T00:26:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T00:26:28,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742342_1518 (size=2172101) 2024-12-07T00:26:28,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742343_1519 (size=213228) 2024-12-07T00:26:28,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742343_1519 (size=213228) 2024-12-07T00:26:28,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742343_1519 (size=213228) 2024-12-07T00:26:28,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T00:26:28,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T00:26:28,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742344_1520 (size=1877034) 2024-12-07T00:26:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742345_1521 (size=6350154) 2024-12-07T00:26:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742345_1521 (size=6350154) 2024-12-07T00:26:29,010 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742345_1521 (size=6350154) 2024-12-07T00:26:29,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742346_1522 (size=533455) 2024-12-07T00:26:29,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742346_1522 (size=533455) 2024-12-07T00:26:29,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742346_1522 (size=533455) 2024-12-07T00:26:29,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742347_1523 (size=7280644) 2024-12-07T00:26:29,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742347_1523 (size=7280644) 2024-12-07T00:26:29,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742347_1523 (size=7280644) 2024-12-07T00:26:29,057 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000002/launch_container.sh] 2024-12-07T00:26:29,057 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000002/container_tokens] 2024-12-07T00:26:29,057 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_2/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000002/sysfs] 2024-12-07T00:26:29,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742348_1524 (size=451756) 2024-12-07T00:26:29,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742348_1524 (size=451756) 2024-12-07T00:26:29,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742348_1524 (size=451756) 2024-12-07T00:26:29,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742349_1525 (size=4188619) 2024-12-07T00:26:29,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742349_1525 (size=4188619) 2024-12-07T00:26:29,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742349_1525 (size=4188619) 2024-12-07T00:26:29,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742350_1526 (size=20406) 2024-12-07T00:26:29,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742350_1526 (size=20406) 2024-12-07T00:26:29,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742350_1526 (size=20406) 2024-12-07T00:26:29,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742351_1527 (size=75495) 2024-12-07T00:26:29,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742351_1527 (size=75495) 2024-12-07T00:26:29,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742351_1527 (size=75495) 2024-12-07T00:26:29,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742352_1528 (size=45609) 2024-12-07T00:26:29,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742352_1528 (size=45609) 2024-12-07T00:26:29,100 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742352_1528 (size=45609) 2024-12-07T00:26:29,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742353_1529 (size=110084) 2024-12-07T00:26:29,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742353_1529 (size=110084) 2024-12-07T00:26:29,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742353_1529 (size=110084) 2024-12-07T00:26:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742354_1530 (size=1323991) 2024-12-07T00:26:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742354_1530 (size=1323991) 2024-12-07T00:26:29,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742354_1530 (size=1323991) 2024-12-07T00:26:29,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742355_1531 (size=23076) 2024-12-07T00:26:29,137 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742355_1531 (size=23076) 2024-12-07T00:26:29,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742355_1531 (size=23076) 2024-12-07T00:26:29,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742356_1532 (size=126803) 2024-12-07T00:26:29,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742356_1532 (size=126803) 2024-12-07T00:26:29,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742356_1532 (size=126803) 2024-12-07T00:26:29,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742357_1533 (size=322274) 2024-12-07T00:26:29,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742357_1533 (size=322274) 2024-12-07T00:26:29,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742357_1533 (size=322274) 2024-12-07T00:26:29,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T00:26:29,191 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T00:26:29,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742358_1534 (size=1832290) 2024-12-07T00:26:29,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742359_1535 (size=30081) 2024-12-07T00:26:29,199 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742359_1535 (size=30081) 2024-12-07T00:26:29,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742359_1535 (size=30081) 2024-12-07T00:26:29,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742360_1536 (size=53616) 2024-12-07T00:26:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742360_1536 (size=53616) 2024-12-07T00:26:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742360_1536 (size=53616) 2024-12-07T00:26:29,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742361_1537 (size=29229) 2024-12-07T00:26:29,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742361_1537 (size=29229) 2024-12-07T00:26:29,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742361_1537 (size=29229) 2024-12-07T00:26:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742362_1538 (size=169089) 2024-12-07T00:26:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742362_1538 (size=169089) 2024-12-07T00:26:29,226 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742362_1538 (size=169089) 2024-12-07T00:26:29,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T00:26:29,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T00:26:29,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742363_1539 (size=5175431) 2024-12-07T00:26:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742364_1540 (size=136454) 2024-12-07T00:26:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742364_1540 (size=136454) 2024-12-07T00:26:29,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742364_1540 (size=136454) 2024-12-07T00:26:29,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742365_1541 (size=907851) 2024-12-07T00:26:29,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742365_1541 (size=907851) 2024-12-07T00:26:29,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742365_1541 (size=907851) 2024-12-07T00:26:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T00:26:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T00:26:29,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742366_1542 (size=3317408) 2024-12-07T00:26:29,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742367_1543 (size=503880) 2024-12-07T00:26:29,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742367_1543 (size=503880) 2024-12-07T00:26:29,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742367_1543 (size=503880) 2024-12-07T00:26:29,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T00:26:29,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T00:26:29,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742368_1544 (size=4695811) 2024-12-07T00:26:29,361 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-12-07T00:26:29,364 INFO [Time-limited test {}] snapshot.ExportSnapshot(658): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-12-07T00:26:29,366 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(751): export split=0 size=13.3 K 2024-12-07T00:26:29,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742369_1545 (size=366) 2024-12-07T00:26:29,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742369_1545 (size=366) 2024-12-07T00:26:29,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742369_1545 (size=366) 2024-12-07T00:26:29,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742370_1546 (size=15) 2024-12-07T00:26:29,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742370_1546 (size=15) 2024-12-07T00:26:29,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742370_1546 (size=15) 2024-12-07T00:26:29,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742371_1547 (size=305053) 2024-12-07T00:26:29,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742371_1547 (size=305053) 2024-12-07T00:26:29,392 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742371_1547 (size=305053) 2024-12-07T00:26:30,097 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:26:30,097 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-12-07T00:26:30,103 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0009_000001 (auth:SIMPLE) from 127.0.0.1:46228 2024-12-07T00:26:30,114 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000001/launch_container.sh] 2024-12-07T00:26:30,115 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000001/container_tokens] 2024-12-07T00:26:30,115 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0009/container_1733530972728_0009_01_000001/sysfs] 2024-12-07T00:26:30,467 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0010_000001 (auth:SIMPLE) from 127.0.0.1:57132 2024-12-07T00:26:31,335 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:26:34,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:34,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-12-07T00:26:34,378 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-12-07T00:26:35,672 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0010_000001 (auth:SIMPLE) from 127.0.0.1:44674 2024-12-07T00:26:35,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742372_1548 (size=350751) 2024-12-07T00:26:35,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742372_1548 (size=350751) 2024-12-07T00:26:35,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742372_1548 (size=350751) 2024-12-07T00:26:37,934 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0010_000001 (auth:SIMPLE) from 127.0.0.1:47406 2024-12-07T00:26:39,880 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:26:40,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region c613c7c0d4bf4a8124f2c33a8aabebf4, had cached 0 bytes from a total of 8460 2024-12-07T00:26:40,648 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(324): Region aa44abbb59514f8b493e3d258a51d1fb, had cached 0 bytes from a total of 5149 2024-12-07T00:26:42,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742373_1549 (size=8054) 2024-12-07T00:26:42,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742373_1549 (size=8054) 2024-12-07T00:26:42,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742373_1549 (size=8054) 2024-12-07T00:26:42,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742374_1550 (size=5566) 2024-12-07T00:26:42,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742374_1550 (size=5566) 2024-12-07T00:26:42,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742374_1550 (size=5566) 2024-12-07T00:26:42,675 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:26:42,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742375_1551 (size=17455) 2024-12-07T00:26:42,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742375_1551 (size=17455) 2024-12-07T00:26:42,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742375_1551 (size=17455) 2024-12-07T00:26:42,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742376_1552 (size=476) 2024-12-07T00:26:42,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742376_1552 (size=476) 2024-12-07T00:26:42,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742376_1552 (size=476) 2024-12-07T00:26:42,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742377_1553 (size=17455) 2024-12-07T00:26:42,880 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742377_1553 (size=17455) 2024-12-07T00:26:42,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742377_1553 (size=17455) 2024-12-07T00:26:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742378_1554 (size=350751) 2024-12-07T00:26:42,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742378_1554 (size=350751) 2024-12-07T00:26:42,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742378_1554 (size=350751) 2024-12-07T00:26:42,960 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1733530972728_0010_000001 (auth:SIMPLE) from 127.0.0.1:43994 2024-12-07T00:26:44,546 INFO [Time-limited test {}] snapshot.ExportSnapshot(1207): Finalize the Snapshot Export 2024-12-07T00:26:44,546 INFO [Time-limited test {}] snapshot.ExportSnapshot(1218): Verify the exported snapshot's expiration status and integrity. 2024-12-07T00:26:44,552 INFO [Time-limited test {}] snapshot.ExportSnapshot(1224): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,552 INFO [Time-limited test {}] snapshot.TestExportSnapshot(362): Exported snapshot 2024-12-07T00:26:44,552 INFO [Time-limited test {}] snapshot.TestExportSnapshot(373): Verified filesystem state 2024-12-07T00:26:44,552 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,552 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-07T00:26:44,553 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-07T00:26:44,553 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(448): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_350022270_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,553 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-12-07T00:26:44,553 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(453): hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/export-test/export-1733531187414/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-12-07T00:26:44,557 INFO [Time-limited test {}] client.HBaseAdmin$18(967): Started disable of testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,557 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster$13(2755): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,559 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T00:26:44,560 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531204560"}]},"ts":"1733531204560"} 2024-12-07T00:26:44,561 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-12-07T00:26:44,563 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-12-07T00:26:44,563 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-12-07T00:26:44,564 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, UNASSIGN}] 2024-12-07T00:26:44,565 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, UNASSIGN 2024-12-07T00:26:44,565 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(786): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, UNASSIGN 2024-12-07T00:26:44,566 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=2be57a594b760c2bdfd27394b33fb2a7, regionState=CLOSING, regionLocation=6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:44,566 INFO [PEWorker-4 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=f4fb4bb36db4edca7b6a0825c61f95de, regionState=CLOSING, regionLocation=6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:44,567 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:26:44,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE; CloseRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7, server=6e6711bf2d40,43281,1733530964846}] 2024-12-07T00:26:44,568 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(338): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-12-07T00:26:44,568 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1819): Initialized subprocedures=[{pid=214, ppid=211, state=RUNNABLE; CloseRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de, server=6e6711bf2d40,33095,1733530964767}] 2024-12-07T00:26:44,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T00:26:44,718 DEBUG [RSProcedureDispatcher-pool-2 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,43281,1733530964846 2024-12-07T00:26:44,719 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(124): Close 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:44,719 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:26:44,719 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1681): Closing 2be57a594b760c2bdfd27394b33fb2a7, disabling compactions & flushes 2024-12-07T00:26:44,719 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:44,719 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:44,719 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. after waiting 0 ms 2024-12-07T00:26:44,719 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:44,719 DEBUG [RSProcedureDispatcher-pool-0 {}] master.ServerManager(801): New admin connection to 6e6711bf2d40,33095,1733530964767 2024-12-07T00:26:44,720 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(124): Close f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:44,720 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(138): Unassign region: split region: false: evictCache: false 2024-12-07T00:26:44,720 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1681): Closing f4fb4bb36db4edca7b6a0825c61f95de, disabling compactions & flushes 2024-12-07T00:26:44,720 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1703): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:44,720 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1724): Waiting without time limit for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:44,720 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1791): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. after waiting 0 ms 2024-12-07T00:26:44,720 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1801): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:26:44,724 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7. 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1635): Region close journal for 2be57a594b760c2bdfd27394b33fb2a7: 2024-12-07T00:26:44,724 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1922): Closed testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de. 2024-12-07T00:26:44,724 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1635): Region close journal for f4fb4bb36db4edca7b6a0825c61f95de: 2024-12-07T00:26:44,725 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(170): Closed 2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:44,726 INFO [PEWorker-5 {}] assignment.RegionStateStore(202): pid=212 updating hbase:meta row=2be57a594b760c2bdfd27394b33fb2a7, regionState=CLOSED 2024-12-07T00:26:44,726 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(170): Closed f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:44,726 INFO [PEWorker-1 {}] assignment.RegionStateStore(202): pid=211 updating hbase:meta row=f4fb4bb36db4edca7b6a0825c61f95de, regionState=CLOSED 2024-12-07T00:26:44,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=213, resume processing ppid=212 2024-12-07T00:26:44,728 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=213, ppid=212, state=SUCCESS; CloseRegionProcedure 2be57a594b760c2bdfd27394b33fb2a7, server=6e6711bf2d40,43281,1733530964846 in 160 msec 2024-12-07T00:26:44,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=214, resume processing ppid=211 2024-12-07T00:26:44,728 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=214, ppid=211, state=SUCCESS; CloseRegionProcedure f4fb4bb36db4edca7b6a0825c61f95de, server=6e6711bf2d40,33095,1733530964767 in 159 msec 2024-12-07T00:26:44,729 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1480): Finished pid=212, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=2be57a594b760c2bdfd27394b33fb2a7, UNASSIGN in 164 msec 2024-12-07T00:26:44,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=211, resume processing ppid=210 2024-12-07T00:26:44,729 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1480): Finished pid=211, ppid=210, state=SUCCESS; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=f4fb4bb36db4edca7b6a0825c61f95de, UNASSIGN in 164 msec 2024-12-07T00:26:44,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1976): Finished subprocedure pid=210, resume processing ppid=209 2024-12-07T00:26:44,731 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1480): Finished pid=210, ppid=209, state=SUCCESS; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 167 msec 2024-12-07T00:26:44,732 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(2113): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1733531204732"}]},"ts":"1733531204732"} 2024-12-07T00:26:44,732 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(1655): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-12-07T00:26:44,734 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-12-07T00:26:44,735 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1480): Finished pid=209, state=SUCCESS; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 176 msec 2024-12-07T00:26:44,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=209 2024-12-07T00:26:44,861 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 209 completed 2024-12-07T00:26:44,862 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.HMaster$5(2505): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,862 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] procedure2.ProcedureExecutor(1098): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,863 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,863 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] access.PermissionStorage(259): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,864 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,865 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33095 {}] access.PermissionStorage(527): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,866 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:44,866 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:44,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,867 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,868 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,868 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/recovered.edits] 2024-12-07T00:26:44,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T00:26:44,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T00:26:44,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T00:26:44,868 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf, FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/recovered.edits] 2024-12-07T00:26:44,868 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:44,869 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,870 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-12-07T00:26:44,870 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-07T00:26:44,870 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:44,871 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:44,871 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:44,871 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-12-07T00:26:44,872 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2 to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/cf/97268340876b4d50ac985c72db94bce2 2024-12-07T00:26:44,873 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/cf/44ff2ac42a284d01a1099ec9529954cb 2024-12-07T00:26:44,875 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7/recovered.edits/9.seqid 2024-12-07T00:26:44,875 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/recovered.edits/9.seqid to hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de/recovered.edits/9.seqid 2024-12-07T00:26:44,875 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/2be57a594b760c2bdfd27394b33fb2a7 2024-12-07T00:26:44,876 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testtb-testExportFileSystemStateWithSkipTmp/f4fb4bb36db4edca7b6a0825c61f95de 2024-12-07T00:26:44,876 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-12-07T00:26:44,877 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,879 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-12-07T00:26:44,881 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-12-07T00:26:44,882 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,882 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-12-07T00:26:44,882 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531204882"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:44,882 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1733531204882"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:44,883 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1808): Deleted 2 regions from META 2024-12-07T00:26:44,883 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(1809): Deleted regions: [{ENCODED => f4fb4bb36db4edca7b6a0825c61f95de, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1733531186117.f4fb4bb36db4edca7b6a0825c61f95de.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 2be57a594b760c2bdfd27394b33fb2a7, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1733531186117.2be57a594b760c2bdfd27394b33fb2a7.', STARTKEY => '1', ENDKEY => ''}] 2024-12-07T00:26:44,883 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-12-07T00:26:44,884 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(2113): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1733531204883"}]},"ts":"9223372036854775807"} 2024-12-07T00:26:44,885 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(1678): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-12-07T00:26:44,887 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, locked=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,887 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1480): Finished pid=215, state=SUCCESS; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 25 msec 2024-12-07T00:26:44,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(1305): Checking to see if procedure is done pid=215 2024-12-07T00:26:44,971 INFO [Time-limited test {}] client.HBaseAdmin$TableFuture(3751): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp, procId: 215 completed 2024-12-07T00:26:44,977 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-07T00:26:44,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:44,979 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] master.MasterRpcServices(764): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" 2024-12-07T00:26:44,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025 {}] snapshot.SnapshotManager(380): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:45,000 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=803 (was 797) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-51 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-7476 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-53 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:45338 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43303 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-50 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: hconnection-0x26cd44eb-shared-pool-52 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1252240879_1 at /127.0.0.1:45318 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1252240879_1 at /127.0.0.1:56250 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (41150622) connection to localhost/127.0.0.1:43303 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_350022270_22 at /127.0.0.1:56286 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 11611) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=795 (was 792) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=452 (was 489), ProcessCount=17 (was 17), AvailableMemoryMB=2510 (was 2672) 2024-12-07T00:26:45,001 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=803 is superior to 500 2024-12-07T00:26:45,001 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2861): Stopping mini mapreduce cluster... 2024-12-07T00:26:45,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39ff7dfc{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T00:26:45,010 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58fcc341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:26:45,010 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:26:45,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7be3ffc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T00:26:45,011 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37e3940c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:26:45,025 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1733530972728_0010_01_000001 is : 143 2024-12-07T00:26:45,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000001/launch_container.sh] 2024-12-07T00:26:45,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000001/container_tokens] 2024-12-07T00:26:45,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-1_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000001/sysfs] 2024-12-07T00:26:47,918 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000002/launch_container.sh] 2024-12-07T00:26:47,918 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000002/container_tokens] 2024-12-07T00:26:47,918 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/MiniMRCluster_2044347687/yarn-1775966880/MiniMRCluster_2044347687-localDir-nm-0_1/usercache/jenkins/appcache/application_1733530972728_0010/container_1733530972728_0010_01_000002/sysfs] 2024-12-07T00:26:50,226 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:26:54,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-12-07T00:26:59,879 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:27:02,023 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5cc9ed7e{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-12-07T00:27:02,024 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@66d0c4b3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:27:02,024 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:27:02,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@501a7a4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T00:27:02,024 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6f349f2d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:27:12,675 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:27:19,031 ERROR [Thread[Thread-410,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T00:27:19,031 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@108a0d16{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-12-07T00:27:19,032 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@28028d61{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:27:19,032 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:27:19,032 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@66ade828{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T00:27:19,032 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7ff40a8b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:27:19,036 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-12-07T00:27:19,042 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-12-07T00:27:19,042 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-12-07T00:27:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741830_1006 (size=947258) 2024-12-07T00:27:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741830_1006 (size=947258) 2024-12-07T00:27:19,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741830_1006 (size=947258) 2024-12-07T00:27:19,047 ERROR [Thread[Thread-434,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T00:27:19,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6dd43818{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-12-07T00:27:19,052 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3d55d21f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:27:19,052 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:27:19,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@588bf098{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-12-07T00:27:19,053 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@245d5c7b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:27:19,054 ERROR [Thread[Thread-391,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-12-07T00:27:19,054 INFO [Time-limited test {}] hbase.HBaseTestingUtility(2864): Mini mapreduce cluster stopped 2024-12-07T00:27:19,054 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1340): Shutting down minicluster 2024-12-07T00:27:19,054 INFO [Time-limited test {}] client.ConnectionImplementation(2127): Closing master protocol: MasterService 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] zookeeper.ReadOnlyZKClient(407): Close zookeeper connection 0x2028d148 to 127.0.0.1:60354 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=300754069, stopped=false 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,055 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T00:27:19,055 INFO [Time-limited test {}] master.ServerManager(987): Cluster shutdown requested of master=6e6711bf2d40,35025,1733530963762 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-12-07T00:27:19,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:27:19,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:27:19,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:27:19,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:27:19,058 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-12-07T00:27:19,062 INFO [Time-limited test {}] procedure2.ProcedureExecutor(700): Stopping 2024-12-07T00:27:19,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,062 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6e6711bf2d40,36059,1733530964705' ***** 2024-12-07T00:27:19,062 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,062 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T00:27:19,062 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6e6711bf2d40,33095,1733530964767' ***** 2024-12-07T00:27:19,062 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,062 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T00:27:19,063 INFO [Time-limited test {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6e6711bf2d40,43281,1733530964846' ***** 2024-12-07T00:27:19,063 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:27:19,063 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,063 INFO [Time-limited test {}] regionserver.HRegionServer(2575): STOPPED: Shutdown requested 2024-12-07T00:27:19,063 INFO [RS:0;6e6711bf2d40:36059 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:27:19,063 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:27:19,063 INFO [RS:0;6e6711bf2d40:36059 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:27:19,063 INFO [RS:2;6e6711bf2d40:43281 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:27:19,063 INFO [RS:2;6e6711bf2d40:43281 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:27:19,063 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(3579): Received CLOSE for c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:27:19,063 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(3579): Received CLOSE for aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:27:19,063 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HeapMemoryManager(220): Stopping 2024-12-07T00:27:19,063 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T00:27:19,063 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T00:27:19,063 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1224): stopping server 6e6711bf2d40,43281,1733530964846 2024-12-07T00:27:19,064 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1224): stopping server 6e6711bf2d40,36059,1733530964705 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-12-07T00:27:19,064 DEBUG [RS:2;6e6711bf2d40:43281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,064 DEBUG [RS:0;6e6711bf2d40:36059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,064 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-12-07T00:27:19,064 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:27:19,064 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:27:19,064 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(3579): Received CLOSE for 1588230740 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(3579): Received CLOSE for adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing c613c7c0d4bf4a8124f2c33a8aabebf4, disabling compactions & flushes 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing aa44abbb59514f8b493e3d258a51d1fb, disabling compactions & flushes 2024-12-07T00:27:19,064 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:27:19,064 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(373): MemStoreFlusher.0 exiting 2024-12-07T00:27:19,064 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(3579): Received CLOSE for 37320884d80707cf3a5cc8fc74ed6256 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:27:19,064 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing adc79d1e913b17fcc146c0c4daf0e7f9, disabling compactions & flushes 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. after waiting 0 ms 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. after waiting 0 ms 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1224): stopping server 6e6711bf2d40,33095,1733530964767 2024-12-07T00:27:19,064 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1603): Online Regions={c613c7c0d4bf4a8124f2c33a8aabebf4=testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4., 1588230740=hbase:meta,,1.1588230740} 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:27:19,064 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. after waiting 0 ms 2024-12-07T00:27:19,064 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:27:19,064 DEBUG [RS:1;6e6711bf2d40:33095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,064 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing adc79d1e913b17fcc146c0c4daf0e7f9 1/1 column families, dataSize=78 B heapSize=488 B 2024-12-07T00:27:19,064 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1599): Waiting on 2 regions to close 2024-12-07T00:27:19,064 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1603): Online Regions={adc79d1e913b17fcc146c0c4daf0e7f9=hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9., 37320884d80707cf3a5cc8fc74ed6256=hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256.} 2024-12-07T00:27:19,065 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1599): Waiting on 1 regions to close 2024-12-07T00:27:19,065 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1603): Online Regions={aa44abbb59514f8b493e3d258a51d1fb=testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb.} 2024-12-07T00:27:19,067 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1681): Closing 1588230740, disabling compactions & flushes 2024-12-07T00:27:19,067 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1703): Closing region hbase:meta,,1.1588230740 2024-12-07T00:27:19,067 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:meta,,1.1588230740 2024-12-07T00:27:19,067 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1791): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-12-07T00:27:19,067 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1801): Updates disabled for region hbase:meta,,1.1588230740 2024-12-07T00:27:19,067 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2837): Flushing 1588230740 3/3 column families, dataSize=68.66 KB heapSize=109 KB 2024-12-07T00:27:19,068 DEBUG [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1629): Waiting on aa44abbb59514f8b493e3d258a51d1fb 2024-12-07T00:27:19,068 DEBUG [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1629): Waiting on 37320884d80707cf3a5cc8fc74ed6256, adc79d1e913b17fcc146c0c4daf0e7f9 2024-12-07T00:27:19,069 DEBUG [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1629): Waiting on 1588230740, c613c7c0d4bf4a8124f2c33a8aabebf4 2024-12-07T00:27:19,069 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/c613c7c0d4bf4a8124f2c33a8aabebf4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:27:19,069 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,069 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:27:19,069 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for c613c7c0d4bf4a8124f2c33a8aabebf4: 2024-12-07T00:27:19,069 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4. 2024-12-07T00:27:19,072 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/default/testExportExpiredSnapshot/aa44abbb59514f8b493e3d258a51d1fb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-12-07T00:27:19,073 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,073 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:27:19,073 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for aa44abbb59514f8b493e3d258a51d1fb: 2024-12-07T00:27:19,073 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1733531110233.aa44abbb59514f8b493e3d258a51d1fb. 2024-12-07T00:27:19,086 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/.tmp/info/ede2a29313ea463aae529403090ec040 is 45, key is default/info:d/1733530968089/Put/seqid=0 2024-12-07T00:27:19,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742379_1555 (size=5037) 2024-12-07T00:27:19,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742379_1555 (size=5037) 2024-12-07T00:27:19,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742379_1555 (size=5037) 2024-12-07T00:27:19,092 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=78 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/.tmp/info/ede2a29313ea463aae529403090ec040 2024-12-07T00:27:19,096 INFO [regionserver/6e6711bf2d40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,097 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/info/36692fca6a674391adf0a1d57735a134 is 173, key is testExportExpiredSnapshot,1,1733531110233.c613c7c0d4bf4a8124f2c33a8aabebf4./info:regioninfo/1733531110657/Put/seqid=0 2024-12-07T00:27:19,098 INFO [regionserver/6e6711bf2d40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,099 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/.tmp/info/ede2a29313ea463aae529403090ec040 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/info/ede2a29313ea463aae529403090ec040 2024-12-07T00:27:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742380_1556 (size=15630) 2024-12-07T00:27:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742380_1556 (size=15630) 2024-12-07T00:27:19,102 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742380_1556 (size=15630) 2024-12-07T00:27:19,103 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=60.26 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/info/36692fca6a674391adf0a1d57735a134 2024-12-07T00:27:19,116 INFO [regionserver/6e6711bf2d40:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,119 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/info/ede2a29313ea463aae529403090ec040, entries=2, sequenceid=6, filesize=4.9 K 2024-12-07T00:27:19,120 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~78 B/78, heapSize ~472 B/472, currentSize=0 B/0 for adc79d1e913b17fcc146c0c4daf0e7f9 in 56ms, sequenceid=6, compaction requested=false 2024-12-07T00:27:19,123 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/namespace/adc79d1e913b17fcc146c0c4daf0e7f9/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,124 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for adc79d1e913b17fcc146c0c4daf0e7f9: 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:namespace,,1733530967428.adc79d1e913b17fcc146c0c4daf0e7f9. 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1681): Closing 37320884d80707cf3a5cc8fc74ed6256, disabling compactions & flushes 2024-12-07T00:27:19,124 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1703): Closing region hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1724): Waiting without time limit for close lock on hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1791): Acquired close lock on hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. after waiting 0 ms 2024-12-07T00:27:19,124 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1801): Updates disabled for region hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:27:19,124 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2837): Flushing 37320884d80707cf3a5cc8fc74ed6256 1/1 column families, dataSize=1.38 KB heapSize=3.33 KB 2024-12-07T00:27:19,138 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/rep_barrier/8a2ec2b7161743c4b3b837657bb97b06 is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25./rep_barrier:/1733531107553/DeleteFamily/seqid=0 2024-12-07T00:27:19,139 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/.tmp/l/38e7b90dfb644c17a21729fd7a6c8583 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1733531107531/DeleteFamily/seqid=0 2024-12-07T00:27:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742381_1557 (size=8007) 2024-12-07T00:27:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742381_1557 (size=8007) 2024-12-07T00:27:19,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742381_1557 (size=8007) 2024-12-07T00:27:19,144 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.34 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/rep_barrier/8a2ec2b7161743c4b3b837657bb97b06 2024-12-07T00:27:19,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742382_1558 (size=5695) 2024-12-07T00:27:19,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742382_1558 (size=5695) 2024-12-07T00:27:19,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742382_1558 (size=5695) 2024-12-07T00:27:19,145 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.38 KB at sequenceid=27 (bloomFilter=false), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/.tmp/l/38e7b90dfb644c17a21729fd7a6c8583 2024-12-07T00:27:19,149 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 38e7b90dfb644c17a21729fd7a6c8583 2024-12-07T00:27:19,150 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/.tmp/l/38e7b90dfb644c17a21729fd7a6c8583 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/l/38e7b90dfb644c17a21729fd7a6c8583 2024-12-07T00:27:19,154 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(539): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 38e7b90dfb644c17a21729fd7a6c8583 2024-12-07T00:27:19,154 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/l/38e7b90dfb644c17a21729fd7a6c8583, entries=12, sequenceid=27, filesize=5.6 K 2024-12-07T00:27:19,155 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3040): Finished flush of dataSize ~1.38 KB/1412, heapSize ~3.31 KB/3392, currentSize=0 B/0 for 37320884d80707cf3a5cc8fc74ed6256 in 31ms, sequenceid=27, compaction requested=false 2024-12-07T00:27:19,158 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/acl/37320884d80707cf3a5cc8fc74ed6256/recovered.edits/30.seqid, newMaxSeqId=30, maxSeqId=1 2024-12-07T00:27:19,159 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,159 INFO [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1922): Closed hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:27:19,159 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1635): Region close journal for 37320884d80707cf3a5cc8fc74ed6256: 2024-12-07T00:27:19,159 DEBUG [RS_CLOSE_REGION-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1733530968266.37320884d80707cf3a5cc8fc74ed6256. 2024-12-07T00:27:19,166 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/table/564762e79601420cb52fcc6c87f93456 is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1733531087954.45558a416e5cf43cb4193d8fc7875e25./table:/1733531107553/DeleteFamily/seqid=0 2024-12-07T00:27:19,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073742383_1559 (size=8861) 2024-12-07T00:27:19,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073742383_1559 (size=8861) 2024-12-07T00:27:19,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073742383_1559 (size=8861) 2024-12-07T00:27:19,171 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.06 KB at sequenceid=202 (bloomFilter=true), to=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/table/564762e79601420cb52fcc6c87f93456 2024-12-07T00:27:19,176 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/info/36692fca6a674391adf0a1d57735a134 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/info/36692fca6a674391adf0a1d57735a134 2024-12-07T00:27:19,179 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/info/36692fca6a674391adf0a1d57735a134, entries=84, sequenceid=202, filesize=15.3 K 2024-12-07T00:27:19,180 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/rep_barrier/8a2ec2b7161743c4b3b837657bb97b06 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/rep_barrier/8a2ec2b7161743c4b3b837657bb97b06 2024-12-07T00:27:19,184 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/rep_barrier/8a2ec2b7161743c4b3b837657bb97b06, entries=21, sequenceid=202, filesize=7.8 K 2024-12-07T00:27:19,184 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/.tmp/table/564762e79601420cb52fcc6c87f93456 as hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/table/564762e79601420cb52fcc6c87f93456 2024-12-07T00:27:19,188 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1989): Added hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/table/564762e79601420cb52fcc6c87f93456, entries=38, sequenceid=202, filesize=8.7 K 2024-12-07T00:27:19,189 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3040): Finished flush of dataSize ~68.66 KB/70312, heapSize ~108.95 KB/111568, currentSize=0 B/0 for 1588230740 in 122ms, sequenceid=202, compaction requested=false 2024-12-07T00:27:19,192 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(409): Wrote file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/data/hbase/meta/1588230740/recovered.edits/205.seqid, newMaxSeqId=205, maxSeqId=1 2024-12-07T00:27:19,193 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:19,193 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:27:19,193 INFO [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1922): Closed hbase:meta,,1.1588230740 2024-12-07T00:27:19,193 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1635): Region close journal for 1588230740: 2024-12-07T00:27:19,193 DEBUG [RS_CLOSE_META-regionserver/6e6711bf2d40:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-12-07T00:27:19,269 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1250): stopping server 6e6711bf2d40,43281,1733530964846; all regions closed. 2024-12-07T00:27:19,269 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1250): stopping server 6e6711bf2d40,36059,1733530964705; all regions closed. 2024-12-07T00:27:19,269 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1250): stopping server 6e6711bf2d40,33095,1733530964767; all regions closed. 2024-12-07T00:27:19,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741835_1011 (size=12979) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741834_1010 (size=14746) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741835_1011 (size=12979) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741834_1010 (size=14746) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741834_1010 (size=14746) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741836_1012 (size=80694) 2024-12-07T00:27:19,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741836_1012 (size=80694) 2024-12-07T00:27:19,277 DEBUG [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs 2024-12-07T00:27:19,277 DEBUG [RS:0;6e6711bf2d40:36059 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs 2024-12-07T00:27:19,277 INFO [RS:0;6e6711bf2d40:36059 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6e6711bf2d40%2C36059%2C1733530964705:(num 1733530966775) 2024-12-07T00:27:19,277 INFO [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6e6711bf2d40%2C43281%2C1733530964846.meta:.meta(num 1733530967173) 2024-12-07T00:27:19,277 DEBUG [RS:0;6e6711bf2d40:36059 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,277 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,277 DEBUG [RS:1;6e6711bf2d40:33095 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs 2024-12-07T00:27:19,277 INFO [RS:1;6e6711bf2d40:33095 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6e6711bf2d40%2C33095%2C1733530964767:(num 1733530966775) 2024-12-07T00:27:19,277 DEBUG [RS:1;6e6711bf2d40:33095 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,277 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,278 INFO [RS:0;6e6711bf2d40:36059 {}] hbase.ChoreService(370): Chore service for: regionserver/6e6711bf2d40:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T00:27:19,278 INFO [RS:1;6e6711bf2d40:33095 {}] hbase.ChoreService(370): Chore service for: regionserver/6e6711bf2d40:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T00:27:19,278 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:27:19,278 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-12-07T00:27:19,278 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:27:19,278 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-12-07T00:27:19,278 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:27:19,278 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-12-07T00:27:19,278 INFO [regionserver/6e6711bf2d40:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T00:27:19,278 INFO [regionserver/6e6711bf2d40:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T00:27:19,279 INFO [RS:1;6e6711bf2d40:33095 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:33095 2024-12-07T00:27:19,279 INFO [RS:0;6e6711bf2d40:36059 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:36059 2024-12-07T00:27:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45899 is added to blk_1073741833_1009 (size=10549) 2024-12-07T00:27:19,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33501 is added to blk_1073741833_1009 (size=10549) 2024-12-07T00:27:19,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6e6711bf2d40,33095,1733530964767 2024-12-07T00:27:19,287 DEBUG [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(1071): Moved 1 WAL file(s) to /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/oldWALs 2024-12-07T00:27:19,287 INFO [RS:2;6e6711bf2d40:43281 {}] wal.AbstractFSWAL(1074): Closed WAL: AsyncFSWAL 6e6711bf2d40%2C43281%2C1733530964846:(num 1733530966776) 2024-12-07T00:27:19,287 DEBUG [RS:2;6e6711bf2d40:43281 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,287 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.LeaseManager(133): Closed leases 2024-12-07T00:27:19,287 INFO [RS:2;6e6711bf2d40:43281 {}] hbase.ChoreService(370): Chore service for: regionserver/6e6711bf2d40:0 had [ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-12-07T00:27:19,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6e6711bf2d40,36059,1733530964705 2024-12-07T00:27:19,288 INFO [RS:2;6e6711bf2d40:43281 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:43281 2024-12-07T00:27:19,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:27:19,288 INFO [regionserver/6e6711bf2d40:0.logRoller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T00:27:19,289 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6e6711bf2d40,36059,1733530964705] 2024-12-07T00:27:19,289 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6e6711bf2d40,36059,1733530964705; numProcessing=1 2024-12-07T00:27:19,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/6e6711bf2d40,43281,1733530964846 2024-12-07T00:27:19,290 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-12-07T00:27:19,291 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6e6711bf2d40,36059,1733530964705 already deleted, retry=false 2024-12-07T00:27:19,291 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6e6711bf2d40,36059,1733530964705 expired; onlineServers=2 2024-12-07T00:27:19,291 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6e6711bf2d40,33095,1733530964767] 2024-12-07T00:27:19,291 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6e6711bf2d40,33095,1733530964767; numProcessing=2 2024-12-07T00:27:19,293 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6e6711bf2d40,33095,1733530964767 already deleted, retry=false 2024-12-07T00:27:19,293 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6e6711bf2d40,33095,1733530964767 expired; onlineServers=1 2024-12-07T00:27:19,293 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [6e6711bf2d40,43281,1733530964846] 2024-12-07T00:27:19,293 DEBUG [RegionServerTracker-0 {}] master.DeadServer(103): Processing 6e6711bf2d40,43281,1733530964846; numProcessing=3 2024-12-07T00:27:19,295 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/draining/6e6711bf2d40,43281,1733530964846 already deleted, retry=false 2024-12-07T00:27:19,295 INFO [RegionServerTracker-0 {}] master.ServerManager(652): Cluster shutdown set; 6e6711bf2d40,43281,1733530964846 expired; onlineServers=0 2024-12-07T00:27:19,295 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2561): ***** STOPPING region server '6e6711bf2d40,35025,1733530963762' ***** 2024-12-07T00:27:19,295 INFO [RegionServerTracker-0 {}] regionserver.HRegionServer(2575): STOPPED: Cluster shutdown set; onlineServer=0 2024-12-07T00:27:19,295 DEBUG [M:0;6e6711bf2d40:35025 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2e3461f0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=6e6711bf2d40/172.17.0.2:0 2024-12-07T00:27:19,296 INFO [M:0;6e6711bf2d40:35025 {}] regionserver.HRegionServer(1224): stopping server 6e6711bf2d40,35025,1733530963762 2024-12-07T00:27:19,296 INFO [M:0;6e6711bf2d40:35025 {}] regionserver.HRegionServer(1250): stopping server 6e6711bf2d40,35025,1733530963762; all regions closed. 2024-12-07T00:27:19,296 DEBUG [M:0;6e6711bf2d40:35025 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-12-07T00:27:19,296 DEBUG [M:0;6e6711bf2d40:35025 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-12-07T00:27:19,296 DEBUG [M:0;6e6711bf2d40:35025 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-12-07T00:27:19,296 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-12-07T00:27:19,296 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.small.0-1733530966298 {}] cleaner.HFileCleaner(306): Exit Thread[master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.small.0-1733530966298,5,FailOnTimeoutGroup] 2024-12-07T00:27:19,296 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.large.0-1733530966283 {}] cleaner.HFileCleaner(306): Exit Thread[master/6e6711bf2d40:0:becomeActiveMaster-HFileCleaner.large.0-1733530966283,5,FailOnTimeoutGroup] 2024-12-07T00:27:19,296 INFO [M:0;6e6711bf2d40:35025 {}] hbase.ChoreService(370): Chore service for: master/6e6711bf2d40:0 had [] on shutdown 2024-12-07T00:27:19,296 DEBUG [M:0;6e6711bf2d40:35025 {}] master.HMaster(1733): Stopping service threads 2024-12-07T00:27:19,296 INFO [M:0;6e6711bf2d40:35025 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-12-07T00:27:19,297 INFO [M:0;6e6711bf2d40:35025 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-12-07T00:27:19,297 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-12-07T00:27:19,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-12-07T00:27:19,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-12-07T00:27:19,298 DEBUG [M:0;6e6711bf2d40:35025 {}] zookeeper.ZKUtil(347): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-12-07T00:27:19,298 WARN [M:0;6e6711bf2d40:35025 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-12-07T00:27:19,298 INFO [M:0;6e6711bf2d40:35025 {}] assignment.AssignmentManager(391): Stopping assignment manager 2024-12-07T00:27:19,298 INFO [M:0;6e6711bf2d40:35025 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-12-07T00:27:19,298 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-12-07T00:27:19,298 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1681): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-12-07T00:27:19,320 INFO [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1703): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:27:19,320 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1724): Waiting without time limit for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:27:19,320 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1791): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-12-07T00:27:19,320 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1801): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-12-07T00:27:19,320 INFO [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(2837): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=805.79 KB heapSize=967.19 KB 2024-12-07T00:27:19,321 ERROR [AsyncFSWAL-0-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:419) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:132) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:830) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:128) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1148) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.appendAndSync(AsyncFSWAL.java:500) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.consume(AsyncFSWAL.java:603) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:27:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,387 INFO [RS:0;6e6711bf2d40:36059 {}] regionserver.HRegionServer(1307): Exiting; stopping=6e6711bf2d40,36059,1733530964705; zookeeper connection closed. 2024-12-07T00:27:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36059-0x10069db04320001, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,387 INFO [RS:1;6e6711bf2d40:33095 {}] regionserver.HRegionServer(1307): Exiting; stopping=6e6711bf2d40,33095,1733530964767; zookeeper connection closed. 2024-12-07T00:27:19,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33095-0x10069db04320002, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,387 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@17aacbae {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@17aacbae 2024-12-07T00:27:19,387 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@535c201f {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@535c201f 2024-12-07T00:27:19,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,392 INFO [RS:2;6e6711bf2d40:43281 {}] regionserver.HRegionServer(1307): Exiting; stopping=6e6711bf2d40,43281,1733530964846; zookeeper connection closed. 2024-12-07T00:27:19,392 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43281-0x10069db04320003, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:27:19,393 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@159479b9 {}] hbase.MiniHBaseCluster$SingleFileSystemShutdownThread(216): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@159479b9 2024-12-07T00:27:19,393 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-12-07T00:27:21,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741833_1009 (size=10549) 2024-12-07T00:27:21,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741835_1011 (size=12979) 2024-12-07T00:27:21,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33385 is added to blk_1073741836_1012 (size=80694) 2024-12-07T00:27:24,584 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_namespace 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-12-07T00:27:24,601 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-12-07T00:27:30,102 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:27:42,675 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:27:44,960 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=22, reuseRatio=68.75% 2024-12-07T00:27:44,961 DEBUG [master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-12-07T00:27:52,732 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-12-07T00:28:12,675 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6e6711bf2d40:35025 227 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@67d3a904 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 19 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 19 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbc716d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 3428 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 35 Waiting on java.util.concurrent.CountDownLatch$Sync@2a4052d1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12695 Waited count: 13221 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2f71b4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@573e9f5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 680 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42-acceptor-0@3822fa4e-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 26 Waited count: 3071 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16b9ed8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46157): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@34419f42): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 113 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 114 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 33462 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a1b0e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46157): State: TIMED_WAITING Blocked count: 68 Waited count: 2083 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46157): State: TIMED_WAITING Blocked count: 64 Waited count: 2089 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46157): State: TIMED_WAITING Blocked count: 58 Waited count: 2084 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46157): State: TIMED_WAITING Blocked count: 61 Waited count: 2098 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46157): State: TIMED_WAITING Blocked count: 74 Waited count: 2078 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@46ba5920): State: TIMED_WAITING Blocked count: 0 Waited count: 170 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@69d666f6): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@1bb7b22a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5821d354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1346053290)): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1388000094-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388000094-87-acceptor-0@6c3ff76a-ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388000094-88): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388000094-89): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-787ae7fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74e5c11c): State: TIMED_WAITING Blocked count: 0 Waited count: 677 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 43265): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 273 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2424f917 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1251 Waited count: 1367 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c31689b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (41150622) connection to localhost/127.0.0.1:46157 from jenkins): State: TIMED_WAITING Blocked count: 1293 Waited count: 1294 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp434427209-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp434427209-122-acceptor-0@2f5c6696-ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 0 Waited count: 1859 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp434427209-123): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp434427209-124): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-624533dd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@28f6570d): State: TIMED_WAITING Blocked count: 0 Waited count: 676 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36787): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 271 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39a222f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1261 Waited count: 1362 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51fa0b03): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 340 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 339 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp179074441-158): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp179074441-159-acceptor-0@5cff0b60-ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp179074441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (qtp179074441-161): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (Session-HouseKeeper-146a1a5a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46273d40): State: TIMED_WAITING Blocked count: 0 Waited count: 675 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (java.util.concurrent.ThreadPoolExecutor$Worker@10d8425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 198 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server idle connection scanner for port 41739): State: TIMED_WAITING Blocked count: 1 Waited count: 35 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 68 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@a61d6bb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 262 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c0e12c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1237 Waited count: 1356 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@63d6daf7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 196 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 338 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@1e30448a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60354): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 169 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 25 Waited count: 723 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26603028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:60354):): State: WAITING Blocked count: 2 Waited count: 820 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5897328f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 852 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f41841f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3261dabd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 307 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:60354)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10219227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fe33ca3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 129 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ebbdd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 276 Waited count: 1038 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c0e22bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 27 Waited count: 117 Waiting on java.util.concurrent.Semaphore$NonfairSync@502221b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025): State: WAITING Blocked count: 135 Waited count: 5358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76020933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@157ff37b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@54117fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@323cafe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@537c4773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6e6711bf2d40:35025): State: TIMED_WAITING Blocked count: 7 Waited count: 2746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$949/0x00007f2714f228a0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@447f23e): State: TIMED_WAITING Blocked count: 0 Waited count: 112 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3336 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 64 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 55 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 34 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33298 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a376c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ccdaebf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fb0d0e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dd424a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 490 (LeaseRenewer:jenkins.hfs.2@localhost:46157): State: TIMED_WAITING Blocked count: 9 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 501 (LeaseRenewer:jenkins.hfs.0@localhost:46157): State: TIMED_WAITING Blocked count: 9 Waited count: 347 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 504 (LeaseRenewer:jenkins.hfs.1@localhost:46157): State: TIMED_WAITING Blocked count: 9 Waited count: 346 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 517 (region-location-0): State: WAITING Blocked count: 7 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 33084 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 573 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 636 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 686 (region-location-1): State: WAITING Blocked count: 5 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 687 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 689 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 394 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1111 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@284c38f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1171 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1278 (ForkJoinPool.commonPool-worker-3): State: WAITING Blocked count: 0 Waited count: 559 Waiting on java.util.concurrent.ForkJoinPool@4def593d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1527 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@61a161cc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1553 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 632 Waiting on java.util.concurrent.ForkJoinPool@4def593d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3078 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5072 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8912 (AsyncFSWAL-1-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5630fa74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8915 (java.util.concurrent.ThreadPoolExecutor$Worker@4b302e23[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8917 (java.util.concurrent.ThreadPoolExecutor$Worker@2062a4e5[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8918 (java.util.concurrent.ThreadPoolExecutor$Worker@6dca9104[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T00:28:42,675 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:29:12,676 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6e6711bf2d40:35025 220 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@67d3a904 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 20 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbc716d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 41 Waiting on java.util.concurrent.CountDownLatch$Sync@3d60e358 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12695 Waited count: 13222 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2f71b4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@573e9f5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 800 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42-acceptor-0@3822fa4e-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 26 Waited count: 3071 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16b9ed8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46157): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@34419f42): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 133 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 39426 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a1b0e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46157): State: TIMED_WAITING Blocked count: 68 Waited count: 2144 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46157): State: TIMED_WAITING Blocked count: 64 Waited count: 2150 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46157): State: TIMED_WAITING Blocked count: 58 Waited count: 2145 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46157): State: TIMED_WAITING Blocked count: 61 Waited count: 2159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46157): State: TIMED_WAITING Blocked count: 74 Waited count: 2139 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@46ba5920): State: TIMED_WAITING Blocked count: 0 Waited count: 200 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@69d666f6): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@1bb7b22a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5821d354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1346053290)): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1388000094-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388000094-87-acceptor-0@6c3ff76a-ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388000094-88): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388000094-89): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-787ae7fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74e5c11c): State: TIMED_WAITING Blocked count: 0 Waited count: 797 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 43265): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 293 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2424f917 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1271 Waited count: 1407 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c31689b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (41150622) connection to localhost/127.0.0.1:46157 from jenkins): State: TIMED_WAITING Blocked count: 1353 Waited count: 1354 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp434427209-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp434427209-122-acceptor-0@2f5c6696-ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 0 Waited count: 1919 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp434427209-123): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp434427209-124): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-624533dd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@28f6570d): State: TIMED_WAITING Blocked count: 0 Waited count: 796 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36787): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 291 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39a222f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1281 Waited count: 1402 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51fa0b03): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 399 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp179074441-158): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp179074441-159-acceptor-0@5cff0b60-ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp179074441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (qtp179074441-161): State: TIMED_WAITING Blocked count: 0 Waited count: 7 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (Session-HouseKeeper-146a1a5a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46273d40): State: TIMED_WAITING Blocked count: 0 Waited count: 795 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (java.util.concurrent.ThreadPoolExecutor$Worker@10d8425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 198 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server idle connection scanner for port 41739): State: TIMED_WAITING Blocked count: 1 Waited count: 41 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 80 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@a61d6bb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 282 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c0e12c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1257 Waited count: 1396 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@63d6daf7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 196 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 401 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 398 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@1e30448a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60354): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 199 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 25 Waited count: 728 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26603028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:60354):): State: WAITING Blocked count: 2 Waited count: 825 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5897328f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 857 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f41841f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3261dabd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 335 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:60354)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10219227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fe33ca3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 130 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ebbdd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 276 Waited count: 1038 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c0e22bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 27 Waited count: 117 Waiting on java.util.concurrent.Semaphore$NonfairSync@502221b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025): State: WAITING Blocked count: 135 Waited count: 5358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76020933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@157ff37b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@54117fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@323cafe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@537c4773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6e6711bf2d40:35025): State: TIMED_WAITING Blocked count: 7 Waited count: 2746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$949/0x00007f2714f228a0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@447f23e): State: TIMED_WAITING Blocked count: 0 Waited count: 132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 3936 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 64 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@206bcc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 40 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39300 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a376c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ccdaebf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fb0d0e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dd424a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (region-location-0): State: WAITING Blocked count: 7 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 39086 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 686 (region-location-1): State: WAITING Blocked count: 5 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 687 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 689 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 400 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1111 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@284c38f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1171 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1278 (ForkJoinPool.commonPool-worker-3): State: TIMED_WAITING Blocked count: 0 Waited count: 560 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 1527 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@61a161cc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1553 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 632 Waiting on java.util.concurrent.ForkJoinPool@4def593d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3078 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5072 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8912 (AsyncFSWAL-1-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5630fa74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T00:29:42,676 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:30:12,676 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6e6711bf2d40:35025 219 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@67d3a904 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbc716d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 4628 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 47 Waiting on java.util.concurrent.CountDownLatch$Sync@5d8ef103 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12695 Waited count: 13223 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2f71b4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@573e9f5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 920 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42-acceptor-0@3822fa4e-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 26 Waited count: 3071 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16b9ed8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46157): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@34419f42): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 45389 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a1b0e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46157): State: TIMED_WAITING Blocked count: 68 Waited count: 2205 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46157): State: TIMED_WAITING Blocked count: 64 Waited count: 2210 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46157): State: TIMED_WAITING Blocked count: 58 Waited count: 2207 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46157): State: TIMED_WAITING Blocked count: 61 Waited count: 2219 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46157): State: TIMED_WAITING Blocked count: 74 Waited count: 2200 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@46ba5920): State: TIMED_WAITING Blocked count: 0 Waited count: 230 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@69d666f6): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@1bb7b22a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5821d354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1346053290)): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1388000094-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388000094-87-acceptor-0@6c3ff76a-ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388000094-88): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388000094-89): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-787ae7fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74e5c11c): State: TIMED_WAITING Blocked count: 0 Waited count: 917 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 43265): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 313 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2424f917 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1291 Waited count: 1447 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c31689b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (41150622) connection to localhost/127.0.0.1:46157 from jenkins): State: TIMED_WAITING Blocked count: 1413 Waited count: 1414 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp434427209-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp434427209-122-acceptor-0@2f5c6696-ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 0 Waited count: 1979 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp434427209-123): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp434427209-124): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-624533dd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@28f6570d): State: TIMED_WAITING Blocked count: 0 Waited count: 916 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36787): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 311 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39a222f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1301 Waited count: 1442 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51fa0b03): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 460 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 459 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp179074441-158): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp179074441-159-acceptor-0@5cff0b60-ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp179074441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (qtp179074441-161): State: TIMED_WAITING Blocked count: 0 Waited count: 8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (Session-HouseKeeper-146a1a5a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46273d40): State: TIMED_WAITING Blocked count: 0 Waited count: 915 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (java.util.concurrent.ThreadPoolExecutor$Worker@10d8425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 198 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server idle connection scanner for port 41739): State: TIMED_WAITING Blocked count: 1 Waited count: 47 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 92 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@a61d6bb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 302 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c0e12c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1277 Waited count: 1436 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@63d6daf7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 196 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 461 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 458 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@1e30448a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60354): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 229 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 25 Waited count: 732 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26603028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:60354):): State: WAITING Blocked count: 2 Waited count: 829 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5897328f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 861 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f41841f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3261dabd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 363 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:60354)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10219227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fe33ca3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ebbdd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 276 Waited count: 1038 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c0e22bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 27 Waited count: 117 Waiting on java.util.concurrent.Semaphore$NonfairSync@502221b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025): State: WAITING Blocked count: 135 Waited count: 5358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76020933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@157ff37b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@54117fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@323cafe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@537c4773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6e6711bf2d40:35025): State: TIMED_WAITING Blocked count: 7 Waited count: 2746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$949/0x00007f2714f228a0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@447f23e): State: TIMED_WAITING Blocked count: 0 Waited count: 152 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 4536 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 64 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@206bcc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 46 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45301 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a376c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ccdaebf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fb0d0e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dd424a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (region-location-0): State: WAITING Blocked count: 7 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 45088 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 686 (region-location-1): State: WAITING Blocked count: 5 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 687 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 689 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 406 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1111 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@284c38f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1171 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1527 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@61a161cc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1553 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3078 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5072 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8912 (AsyncFSWAL-1-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5630fa74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 15 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T00:30:42,676 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:31:12,676 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6e6711bf2d40:35025 218 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@67d3a904 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 22 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbc716d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5228 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 53 Waiting on java.util.concurrent.CountDownLatch$Sync@64efc3b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12695 Waited count: 13224 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2f71b4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@573e9f5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 1040 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42-acceptor-0@3822fa4e-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 26 Waited count: 3071 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16b9ed8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46157): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@34419f42): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 173 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51352 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a1b0e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46157): State: TIMED_WAITING Blocked count: 68 Waited count: 2266 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46157): State: TIMED_WAITING Blocked count: 64 Waited count: 2271 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46157): State: TIMED_WAITING Blocked count: 58 Waited count: 2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46157): State: TIMED_WAITING Blocked count: 61 Waited count: 2281 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46157): State: TIMED_WAITING Blocked count: 74 Waited count: 2261 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@46ba5920): State: TIMED_WAITING Blocked count: 0 Waited count: 260 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@69d666f6): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@1bb7b22a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5821d354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1346053290)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1388000094-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388000094-87-acceptor-0@6c3ff76a-ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388000094-88): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388000094-89): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-787ae7fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74e5c11c): State: TIMED_WAITING Blocked count: 0 Waited count: 1037 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 43265): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 333 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2424f917 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1311 Waited count: 1487 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c31689b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (41150622) connection to localhost/127.0.0.1:46157 from jenkins): State: TIMED_WAITING Blocked count: 1473 Waited count: 1474 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp434427209-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp434427209-122-acceptor-0@2f5c6696-ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 0 Waited count: 2039 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp434427209-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp434427209-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-624533dd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@28f6570d): State: TIMED_WAITING Blocked count: 0 Waited count: 1036 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36787): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 331 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39a222f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1321 Waited count: 1482 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51fa0b03): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 520 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 519 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp179074441-158): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp179074441-159-acceptor-0@5cff0b60-ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp179074441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (qtp179074441-161): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (Session-HouseKeeper-146a1a5a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46273d40): State: TIMED_WAITING Blocked count: 0 Waited count: 1035 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (java.util.concurrent.ThreadPoolExecutor$Worker@10d8425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 198 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server idle connection scanner for port 41739): State: TIMED_WAITING Blocked count: 1 Waited count: 53 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 104 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@a61d6bb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 322 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c0e12c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1297 Waited count: 1476 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@63d6daf7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 196 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 521 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 518 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@1e30448a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60354): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 259 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 25 Waited count: 736 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26603028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:60354):): State: WAITING Blocked count: 2 Waited count: 833 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5897328f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 865 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f41841f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3261dabd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 391 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:60354)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10219227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fe33ca3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 131 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ebbdd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 276 Waited count: 1038 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c0e22bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 27 Waited count: 117 Waiting on java.util.concurrent.Semaphore$NonfairSync@502221b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025): State: WAITING Blocked count: 135 Waited count: 5358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76020933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@157ff37b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@54117fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@323cafe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@537c4773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6e6711bf2d40:35025): State: TIMED_WAITING Blocked count: 7 Waited count: 2746 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$949/0x00007f2714f228a0.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@447f23e): State: TIMED_WAITING Blocked count: 0 Waited count: 172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5135 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 64 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@206bcc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 52 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51303 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a376c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ccdaebf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fb0d0e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dd424a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (region-location-0): State: WAITING Blocked count: 7 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51089 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 686 (region-location-1): State: WAITING Blocked count: 5 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 687 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 689 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 412 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1111 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@284c38f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1171 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1527 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@61a161cc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3078 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5072 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8912 (AsyncFSWAL-1-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5630fa74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-12-07T00:31:42,677 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:32:12,677 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer(620): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-12-07T00:32:19,321 DEBUG [M:0;6e6711bf2d40:35025 {}] regionserver.HRegion(1635): Region close journal for 1595e783b53d99cd5eef43b6debb2682: 2024-12-07T00:32:19,321 WARN [M:0;6e6711bf2d40:35025 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:883) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doSync(AsyncFSWAL.java:671) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$1(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:600) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:590) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2869) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2811) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2670) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2644) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2635) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1810) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1631) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1586) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1569) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=3722, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:879) ~[classes/:?] ... 20 more 2024-12-07T00:32:19,323 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(163): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:396) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:243) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:201) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:236) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:160) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:32:19,324 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-12-07T00:32:19,324 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-12-07T00:32:19,325 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 2024-12-07T00:32:19,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:32:19,325 WARN [Close-WAL-Writer-0 {}] wal.AsyncFSWAL(734): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:610) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:2.7.0-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:164) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:732) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:32:19,326 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 2024-12-07T00:32:19,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;6e6711bf2d40:35025 221 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 8 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 20 Waited count: 14 Waiting on java.lang.ref.ReferenceQueue$Lock@67d3a904 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 15 Waited count: 23 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 29 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bbc716d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5827 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 59 Waiting on java.util.concurrent.CountDownLatch$Sync@1a7c85f6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 12695 Waited count: 13225 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:394) app//org.apache.hadoop.hbase.MiniHBaseCluster.waitUntilShutDown(MiniHBaseCluster.java:921) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniHBaseCluster(HBaseTestingUtility.java:1359) app//org.apache.hadoop.hbase.HBaseTestingUtility.shutdownMiniCluster(HBaseTestingUtility.java:1341) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:121) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 12 Waited count: 13 Waiting on java.lang.ref.ReferenceQueue$Lock@2f71b4c0 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@573e9f5a Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@20ad794b): State: TIMED_WAITING Blocked count: 0 Waited count: 1160 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1056518703-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1056518703-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1056518703-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1056518703-40): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1056518703-41): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1056518703-42-acceptor-0@3822fa4e-ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:38533}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1056518703-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1056518703-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-5c96b88a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 26 Waited count: 3071 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16b9ed8d Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 46157): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@34419f42): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 193 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@a039f09): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 194 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57316 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 0 Waited count: 1483 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3a1b0e03 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 46157): State: TIMED_WAITING Blocked count: 68 Waited count: 2327 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 46157): State: TIMED_WAITING Blocked count: 64 Waited count: 2332 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 46157): State: TIMED_WAITING Blocked count: 58 Waited count: 2329 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 46157): State: TIMED_WAITING Blocked count: 61 Waited count: 2341 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 46157): State: TIMED_WAITING Blocked count: 74 Waited count: 2322 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@46ba5920): State: TIMED_WAITING Blocked count: 0 Waited count: 290 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@69d666f6): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@1bb7b22a): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@5821d354): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1346053290)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 85 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 86 (qtp1388000094-86): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp1388000094-87-acceptor-0@6c3ff76a-ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:35177}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp1388000094-88): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp1388000094-89): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (Session-HouseKeeper-787ae7fc-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@74e5c11c): State: TIMED_WAITING Blocked count: 0 Waited count: 1157 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 94 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 95 (IPC Server idle connection scanner for port 43265): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 97 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 100 (Command processor): State: WAITING Blocked count: 0 Waited count: 353 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2424f917 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 101 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1331 Waited count: 1527 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 102 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 84 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@5c31689b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 96 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 93 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 103 (IPC Server handler 0 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 104 (IPC Server handler 1 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 2 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 3 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 4 on default port 43265): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Client (41150622) connection to localhost/127.0.0.1:46157 from jenkins): State: TIMED_WAITING Blocked count: 1533 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 121 (qtp434427209-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 122 (qtp434427209-122-acceptor-0@2f5c6696-ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:32849}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (IPC Parameter Sending Thread for localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 0 Waited count: 2099 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp434427209-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp434427209-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-624533dd-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@28f6570d): State: TIMED_WAITING Blocked count: 0 Waited count: 1156 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 36787): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 136 (Command processor): State: WAITING Blocked count: 1 Waited count: 351 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@39a222f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1341 Waited count: 1522 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 117 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@51fa0b03): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 580 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 579 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 144 (IPC Server handler 4 on default port 36787): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 156 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp179074441-158): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$255/0x00007f2714428988.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp179074441-159-acceptor-0@5cff0b60-ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:42957}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 160 (qtp179074441-160): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (qtp179074441-161): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (Session-HouseKeeper-146a1a5a-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3)): State: TIMED_WAITING Blocked count: 4 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4)): State: TIMED_WAITING Blocked count: 5 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 166 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 175 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 182 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 187 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 189 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@46273d40): State: TIMED_WAITING Blocked count: 0 Waited count: 1155 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 194 (java.util.concurrent.ThreadPoolExecutor$Worker@10d8425[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 198 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 199 (IPC Server idle connection scanner for port 41739): State: TIMED_WAITING Blocked count: 1 Waited count: 59 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 201 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 116 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 204 (java.util.concurrent.ThreadPoolExecutor$Worker@a61d6bb[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 207 (Command processor): State: WAITING Blocked count: 1 Waited count: 342 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3c0e12c6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 208 (BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157): State: TIMED_WAITING Blocked count: 1317 Waited count: 1516 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 155 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@63d6daf7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 200 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 196 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 210 (IPC Server handler 0 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 211 (IPC Server handler 1 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 2 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 3 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 581 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 4 on default port 41739): State: TIMED_WAITING Blocked count: 0 Waited count: 578 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 217 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 224 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 228 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 231 (java.util.concurrent.ThreadPoolExecutor$Worker@1e30448a[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 236 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtility$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtility.java:599) Thread 238 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 2 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 11 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 240 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:60354): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 237 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 241 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 289 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 242 (SyncThread:0): State: WAITING Blocked count: 25 Waited count: 741 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@26603028 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 243 (ProcessThread(sid:0 cport:60354):): State: WAITING Blocked count: 2 Waited count: 838 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5897328f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 244 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 870 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4f41841f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 245 (NIOWorkerThread-1): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 256 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3261dabd Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 419 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (RS-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 17 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (Time-limited test-SendThread(127.0.0.1:60354)): State: RUNNABLE Blocked count: 18 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 260 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 55 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@10219227 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 261 (NIOWorkerThread-2): State: WAITING Blocked count: 4 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 262 (NIOWorkerThread-3): State: WAITING Blocked count: 3 Waited count: 134 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-4): State: WAITING Blocked count: 0 Waited count: 135 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (zk-event-processor-pool-0): State: WAITING Blocked count: 34 Waited count: 85 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3fe33ca3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-6): State: WAITING Blocked count: 2 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-7): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-8): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-9): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-12): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-13): State: WAITING Blocked count: 1 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-14): State: WAITING Blocked count: 1 Waited count: 133 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-15): State: WAITING Blocked count: 4 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 132 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@15a75951 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@3ebbdd0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 278 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 276 Waited count: 1038 Waiting on java.util.concurrent.Semaphore$NonfairSync@4c0e22bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 27 Waited count: 117 Waiting on java.util.concurrent.Semaphore$NonfairSync@502221b0 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 280 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35025): State: WAITING Blocked count: 135 Waited count: 5358 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@76020933 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 281 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 282 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@28d93f1e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:71) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 283 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@157ff37b Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 284 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@54117fc5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@323cafe5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 286 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=35025): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@537c4773 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:85) Thread 290 (RS-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (RS-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 334 (RS-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 288 (M:0;6e6711bf2d40:35025): State: TIMED_WAITING Blocked count: 7 Waited count: 2747 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1011) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown(AbstractFSWALProvider.java:184) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:272) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1758) app//org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:1285) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:603) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 361 (master/6e6711bf2d40:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 363 (org.apache.hadoop.hdfs.PeerCache@447f23e): State: TIMED_WAITING Blocked count: 0 Waited count: 192 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 380 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5735 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:179) Thread 393 (RS-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 64 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 394 (RS-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 92 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 406 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 68 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@206bcc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 417 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 58 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57304 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (RS-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 35 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 427 (RS-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 10 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5a376c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 472 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 12 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5ccdaebf Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 470 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 14 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5fb0d0e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 474 (regionserver/6e6711bf2d40:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dd424a8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 517 (region-location-0): State: WAITING Blocked count: 7 Waited count: 13 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 537 (RS-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 543 (RS-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 550 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57091 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 557 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 686 (region-location-1): State: WAITING Blocked count: 5 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 687 (region-location-2): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 689 (region-location-3): State: WAITING Blocked count: 2 Waited count: 6 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1006 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 418 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1072 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 6 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1100 (RS-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1111 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 59 Waited count: 94 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@284c38f5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1170 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1171 (RS-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1527 (Container metrics unregistration): State: WAITING Blocked count: 10 Waited count: 33 Waiting on java.util.TaskQueue@61a161cc Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 2048 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 3078 (region-location-4): State: WAITING Blocked count: 2 Waited count: 5 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@7c66a858 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5072 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5073 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 5074 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8912 (AsyncFSWAL-1-hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData-prefix:6e6711bf2d40,35025,1733530963762): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5630fa74 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8923 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 8924 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8929 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doShutdown(AsyncFSWAL.java:793) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:995) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:990) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 8930 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) app//org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL$$Lambda$1115/0x00007f2715152f38.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-12-07T00:32:23,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 after 4000ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-12-07T00:32:24,323 ERROR [WAL-Shutdown-0 {}] wal.AsyncFSWAL(794): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-12-07T00:32:24,323 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(243): LogRoller exiting. 2024-12-07T00:32:24,323 INFO [M:0;6e6711bf2d40:35025 {}] flush.MasterFlushTableProcedureManager(91): stop: server shutting down. 2024-12-07T00:32:24,323 INFO [M:0;6e6711bf2d40:35025 {}] ipc.NettyRpcServer(351): Stopping server on /172.17.0.2:35025 2024-12-07T00:32:24,325 DEBUG [M:0;6e6711bf2d40:35025 {}] zookeeper.RecoverableZooKeeper(215): Node /hbase/rs/6e6711bf2d40,35025,1733530963762 already deleted, retry=false 2024-12-07T00:32:24,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46157/user/jenkins/test-data/2b5f6ad0-727e-ff4a-aba4-5def0e501b45/MasterData/WALs/6e6711bf2d40,35025,1733530963762/6e6711bf2d40%2C35025%2C1733530963762.1733530965439 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-2.7.0-SNAPSHOT.jar:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.recoverLease(AsyncFSWAL.java:722) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.lambda$closeWriter$5(AsyncFSWAL.java:735) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-12-07T00:32:24,427 INFO [M:0;6e6711bf2d40:35025 {}] regionserver.HRegionServer(1307): Exiting; stopping=6e6711bf2d40,35025,1733530963762; zookeeper connection closed. 2024-12-07T00:32:24,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:32:24,427 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35025-0x10069db04320000, quorum=127.0.0.1:60354, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-12-07T00:32:24,430 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@666fb670{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:32:24,431 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1eafff66{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:32:24,431 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:32:24,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13ce8b71{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:32:24,431 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@65fa26fe{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:32:24,432 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:32:24,433 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:32:24,433 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:32:24,433 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2074794187-172.17.0.2-1733530959153 (Datanode Uuid 8b0012f3-4bca-4ac9-98e3-1111e06048df) service to localhost/127.0.0.1:46157 2024-12-07T00:32:24,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data5/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,434 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data6/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,435 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:32:24,437 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1886d2b5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:32:24,437 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@316f1edc{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:32:24,437 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:32:24,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2b9deb82{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:32:24,438 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@37338c92{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:32:24,439 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:32:24,439 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:32:24,439 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2074794187-172.17.0.2-1733530959153 (Datanode Uuid deaefd16-9ff7-42f6-95fa-e2f923401bf9) service to localhost/127.0.0.1:46157 2024-12-07T00:32:24,439 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:32:24,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data3/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,440 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data4/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,441 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:32:24,444 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3d2c3e29{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-12-07T00:32:24,444 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1afb4ca2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:32:24,444 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:32:24,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a0dab5a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:32:24,445 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49de3167{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:32:24,446 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-12-07T00:32:24,446 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-12-07T00:32:24,446 WARN [BP-2074794187-172.17.0.2-1733530959153 heartbeating to localhost/127.0.0.1:46157 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2074794187-172.17.0.2-1733530959153 (Datanode Uuid 59cc2ab6-abd6-4e0b-8943-d15253b3223d) service to localhost/127.0.0.1:46157 2024-12-07T00:32:24,446 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-12-07T00:32:24,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data1/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,447 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/cluster_608f69f7-ff75-cb88-fa14-12fb88230625/dfs/data/data2/current/BP-2074794187-172.17.0.2-1733530959153 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-12-07T00:32:24,447 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-12-07T00:32:24,454 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5da2d515{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-12-07T00:32:24,454 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@69dc1403{HTTP/1.1, (http/1.1)}{localhost:0} 2024-12-07T00:32:24,454 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-12-07T00:32:24,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70357eda{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-12-07T00:32:24,455 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@744df411{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_branch-2/hbase-mapreduce/target/test-data/2bda5003-ffda-4a0b-5640-704e4d0643a6/hadoop.log.dir/,STOPPED} 2024-12-07T00:32:24,468 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(345): Shutdown MiniZK cluster with all ZK servers 2024-12-07T00:32:24,722 INFO [Time-limited test {}] hbase.HBaseTestingUtility(1347): Minicluster is down