2024-11-28 00:24:57,935 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-11-28 00:24:57,952 main DEBUG Took 0.013889 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-28 00:24:57,952 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-28 00:24:57,953 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-28 00:24:57,954 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-28 00:24:57,955 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:57,964 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-28 00:24:57,999 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,001 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,002 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,002 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,003 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,003 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,006 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,006 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,007 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,007 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,008 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,009 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,009 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,010 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,010 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,011 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,011 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,012 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,013 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,014 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-28 00:24:58,017 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,017 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-28 00:24:58,019 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-28 00:24:58,020 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-28 00:24:58,023 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-28 00:24:58,023 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-28 00:24:58,025 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-28 00:24:58,026 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-28 00:24:58,038 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-28 00:24:58,042 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-28 00:24:58,044 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-28 00:24:58,044 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-28 00:24:58,045 main DEBUG createAppenders(={Console}) 2024-11-28 00:24:58,046 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 initialized 2024-11-28 00:24:58,046 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 2024-11-28 00:24:58,047 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@57bc27f5 OK. 2024-11-28 00:24:58,048 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-28 00:24:58,048 main DEBUG OutputStream closed 2024-11-28 00:24:58,048 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-28 00:24:58,049 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-28 00:24:58,049 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@57cf54e1 OK 2024-11-28 00:24:58,185 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-28 00:24:58,188 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-28 00:24:58,189 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-28 00:24:58,191 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-28 00:24:58,191 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-28 00:24:58,192 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-28 00:24:58,192 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-28 00:24:58,193 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-28 00:24:58,193 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-28 00:24:58,194 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-28 00:24:58,194 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-28 00:24:58,194 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-28 00:24:58,195 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-28 00:24:58,195 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-28 00:24:58,196 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-28 00:24:58,196 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-28 00:24:58,197 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-28 00:24:58,198 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-28 00:24:58,201 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28 00:24:58,202 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@1608bcbd) with optional ClassLoader: null 2024-11-28 00:24:58,203 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-28 00:24:58,204 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@1608bcbd] started OK. 2024-11-28T00:24:58,225 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestExportSnapshot timeout: 13 mins 2024-11-28 00:24:58,230 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-28 00:24:58,230 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-28T00:24:58,872 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1 2024-11-28T00:24:58,873 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobExportSnapshot timeout: 13 mins 2024-11-28T00:24:58,875 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.snapshot.TestMobSecureExportSnapshot timeout: 13 mins 2024-11-28T00:24:58,934 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-28T00:24:59,248 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=3, rsPorts=, rsClass=null, numDataNodes=3, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-28T00:24:59,272 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1, deleteOnExit=true 2024-11-28T00:24:59,272 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-28T00:24:59,274 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/test.cache.data in system properties and HBase conf 2024-11-28T00:24:59,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T00:24:59,275 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir in system properties and HBase conf 2024-11-28T00:24:59,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T00:24:59,276 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T00:24:59,277 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-28T00:24:59,399 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-28T00:24:59,414 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:24:59,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:24:59,428 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T00:24:59,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:24:59,429 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T00:24:59,430 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T00:24:59,434 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:24:59,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:24:59,435 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T00:24:59,438 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/nfs.dump.dir in system properties and HBase conf 2024-11-28T00:24:59,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir in system properties and HBase conf 2024-11-28T00:24:59,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:24:59,440 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T00:24:59,441 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T00:25:00,768 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-28T00:25:00,896 INFO [Time-limited test {}] log.Log(170): Logging initialized @4254ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-28T00:25:01,020 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:01,173 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:01,225 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:01,226 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:01,228 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T00:25:01,257 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:01,263 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:01,265 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:01,535 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5996a1b{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-35791-hadoop-hdfs-3_4_1-tests_jar-_-any-12074501516462526283/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T00:25:01,544 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791} 2024-11-28T00:25:01,555 INFO [Time-limited test {}] server.Server(415): Started @4904ms 2024-11-28T00:25:02,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:02,126 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:02,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:02,129 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:02,129 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T00:25:02,132 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:02,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:02,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@64986ec5{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-46399-hadoop-hdfs-3_4_1-tests_jar-_-any-4794158634168452593/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:25:02,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399} 2024-11-28T00:25:02,316 INFO [Time-limited test {}] server.Server(415): Started @5676ms 2024-11-28T00:25:02,383 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:25:02,576 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:02,587 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:02,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:02,607 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:02,608 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:25:02,612 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:02,613 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:02,812 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6b9a68a9{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-39963-hadoop-hdfs-3_4_1-tests_jar-_-any-16741571491040226038/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:25:02,813 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963} 2024-11-28T00:25:02,814 INFO [Time-limited test {}] server.Server(415): Started @6173ms 2024-11-28T00:25:02,816 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:25:02,971 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:02,991 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:03,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:03,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:03,003 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-28T00:25:03,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:03,005 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:03,096 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,100 WARN [Thread-107 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,100 WARN [Thread-106 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,102 WARN [Thread-108 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,223 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@367f3488{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-37047-hadoop-hdfs-3_4_1-tests_jar-_-any-6156943951819397734/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:25:03,226 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047} 2024-11-28T00:25:03,226 INFO [Time-limited test {}] server.Server(415): Started @6586ms 2024-11-28T00:25:03,229 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-28T00:25:03,271 WARN [Thread-82 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:25:03,289 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:25:03,414 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2a6ae448d9baf20 with lease ID 0x73129b206c68d546: Processing first storage report for DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e from datanode DatanodeRegistration(127.0.0.1:43159, datanodeUuid=8fbe14a4-867b-4ed5-8ffb-59d9026469be, infoPort=42613, infoSecurePort=0, ipcPort=43091, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,415 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2a6ae448d9baf20 with lease ID 0x73129b206c68d546: from storage DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e node DatanodeRegistration(127.0.0.1:43159, datanodeUuid=8fbe14a4-867b-4ed5-8ffb-59d9026469be, infoPort=42613, infoSecurePort=0, ipcPort=43091, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbccb3f9bef863604 with lease ID 0x73129b206c68d545: Processing first storage report for DS-fe60c522-f41b-4d88-9ac2-209cfed0f304 from datanode DatanodeRegistration(127.0.0.1:45909, datanodeUuid=79b08128-f8b3-4b4e-b480-34b9004049c2, infoPort=36679, infoSecurePort=0, ipcPort=44599, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,416 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbccb3f9bef863604 with lease ID 0x73129b206c68d545: from storage DS-fe60c522-f41b-4d88-9ac2-209cfed0f304 node DatanodeRegistration(127.0.0.1:45909, datanodeUuid=79b08128-f8b3-4b4e-b480-34b9004049c2, infoPort=36679, infoSecurePort=0, ipcPort=44599, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,416 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc2a6ae448d9baf20 with lease ID 0x73129b206c68d546: Processing first storage report for DS-3ed92f4f-460b-4b9a-8dee-5f1145eb52ac from datanode DatanodeRegistration(127.0.0.1:43159, datanodeUuid=8fbe14a4-867b-4ed5-8ffb-59d9026469be, infoPort=42613, infoSecurePort=0, ipcPort=43091, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,417 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc2a6ae448d9baf20 with lease ID 0x73129b206c68d546: from storage DS-3ed92f4f-460b-4b9a-8dee-5f1145eb52ac node DatanodeRegistration(127.0.0.1:43159, datanodeUuid=8fbe14a4-867b-4ed5-8ffb-59d9026469be, infoPort=42613, infoSecurePort=0, ipcPort=43091, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,417 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xbccb3f9bef863604 with lease ID 0x73129b206c68d545: Processing first storage report for DS-8cf9dce7-aca7-47cb-9464-5781dc18bd0a from datanode DatanodeRegistration(127.0.0.1:45909, datanodeUuid=79b08128-f8b3-4b4e-b480-34b9004049c2, infoPort=36679, infoSecurePort=0, ipcPort=44599, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,417 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xbccb3f9bef863604 with lease ID 0x73129b206c68d545: from storage DS-8cf9dce7-aca7-47cb-9464-5781dc18bd0a node DatanodeRegistration(127.0.0.1:45909, datanodeUuid=79b08128-f8b3-4b4e-b480-34b9004049c2, infoPort=36679, infoSecurePort=0, ipcPort=44599, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,567 WARN [Thread-139 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,584 WARN [Thread-140 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430/current, will proceed with Du for space computation calculation, 2024-11-28T00:25:03,693 WARN [Thread-129 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-28T00:25:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x147296727af7eda7 with lease ID 0x73129b206c68d547: Processing first storage report for DS-8b96cc8c-61ab-4969-a00a-3e67bb0de7f6 from datanode DatanodeRegistration(127.0.0.1:39705, datanodeUuid=90ba7e90-1881-4c45-a690-64f6ef41cebd, infoPort=35157, infoSecurePort=0, ipcPort=46339, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,718 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x147296727af7eda7 with lease ID 0x73129b206c68d547: from storage DS-8b96cc8c-61ab-4969-a00a-3e67bb0de7f6 node DatanodeRegistration(127.0.0.1:39705, datanodeUuid=90ba7e90-1881-4c45-a690-64f6ef41cebd, infoPort=35157, infoSecurePort=0, ipcPort=46339, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,719 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x147296727af7eda7 with lease ID 0x73129b206c68d547: Processing first storage report for DS-3f5370e6-ba71-47c9-bdaa-6f6e44d8a2bc from datanode DatanodeRegistration(127.0.0.1:39705, datanodeUuid=90ba7e90-1881-4c45-a690-64f6ef41cebd, infoPort=35157, infoSecurePort=0, ipcPort=46339, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430) 2024-11-28T00:25:03,719 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x147296727af7eda7 with lease ID 0x73129b206c68d547: from storage DS-3f5370e6-ba71-47c9-bdaa-6f6e44d8a2bc node DatanodeRegistration(127.0.0.1:39705, datanodeUuid=90ba7e90-1881-4c45-a690-64f6ef41cebd, infoPort=35157, infoSecurePort=0, ipcPort=46339, storageInfo=lv=-57;cid=testClusterID;nsid=1459040673;c=1732753500430), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-28T00:25:03,951 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1 2024-11-28T00:25:04,085 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/zookeeper_0, clientPort=64116, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-28T00:25:04,101 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=64116 2024-11-28T00:25:04,118 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:04,123 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:04,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:25:04,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:25:04,497 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741825_1001 (size=7) 2024-11-28T00:25:04,890 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a with version=8 2024-11-28T00:25:04,890 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/hbase-staging 2024-11-28T00:25:05,000 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-28T00:25:05,263 INFO [Time-limited test {}] client.ConnectionUtils(128): master/72c8c7f04009:0 server-side Connection retries=45 2024-11-28T00:25:05,274 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:05,275 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:05,279 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:25:05,280 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:05,280 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:25:05,452 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-28T00:25:05,550 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-28T00:25:05,566 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-28T00:25:05,571 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:25:05,611 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 12016 (auto-detected) 2024-11-28T00:25:05,613 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-28T00:25:05,640 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39673 2024-11-28T00:25:05,675 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39673 connecting to ZooKeeper ensemble=127.0.0.1:64116 2024-11-28T00:25:05,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:396730x0, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:25:05,729 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39673-0x1003b83f01b0000 connected 2024-11-28T00:25:05,793 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:05,798 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:05,817 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:25:05,821 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a, hbase.cluster.distributed=false 2024-11-28T00:25:05,872 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:25:05,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39673 2024-11-28T00:25:05,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39673 2024-11-28T00:25:05,911 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39673 2024-11-28T00:25:05,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39673 2024-11-28T00:25:05,913 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39673 2024-11-28T00:25:06,058 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/72c8c7f04009:0 server-side Connection retries=45 2024-11-28T00:25:06,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,061 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,062 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:25:06,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,062 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:25:06,067 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:25:06,070 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:25:06,076 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43335 2024-11-28T00:25:06,079 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43335 connecting to ZooKeeper ensemble=127.0.0.1:64116 2024-11-28T00:25:06,081 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,139 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:433350x0, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:25:06,136 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:433350x0, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:25:06,145 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:25:06,152 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43335-0x1003b83f01b0001 connected 2024-11-28T00:25:06,155 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:25:06,158 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:25:06,165 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:25:06,171 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43335 2024-11-28T00:25:06,172 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43335 2024-11-28T00:25:06,176 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43335 2024-11-28T00:25:06,177 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43335 2024-11-28T00:25:06,180 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43335 2024-11-28T00:25:06,208 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/72c8c7f04009:0 server-side Connection retries=45 2024-11-28T00:25:06,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,208 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,209 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:25:06,209 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,237 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:25:06,238 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:25:06,239 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:25:06,262 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41621 2024-11-28T00:25:06,266 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41621 connecting to ZooKeeper ensemble=127.0.0.1:64116 2024-11-28T00:25:06,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,285 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:416210x0, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:25:06,286 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:25:06,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:416210x0, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:25:06,289 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:25:06,290 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41621-0x1003b83f01b0002 connected 2024-11-28T00:25:06,293 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:25:06,295 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:25:06,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41621 2024-11-28T00:25:06,296 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41621 2024-11-28T00:25:06,297 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41621 2024-11-28T00:25:06,304 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41621 2024-11-28T00:25:06,305 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41621 2024-11-28T00:25:06,332 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/72c8c7f04009:0 server-side Connection retries=45 2024-11-28T00:25:06,332 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,333 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-28T00:25:06,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-28T00:25:06,333 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-28T00:25:06,334 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-28T00:25:06,334 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-28T00:25:06,337 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36427 2024-11-28T00:25:06,339 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36427 connecting to ZooKeeper ensemble=127.0.0.1:64116 2024-11-28T00:25:06,341 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,344 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,355 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364270x0, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:25:06,356 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-28T00:25:06,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:364270x0, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-28T00:25:06,367 INFO [Time-limited test {}] mob.MobFileCache(128): MobFileCache disabled 2024-11-28T00:25:06,372 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364270x0, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-28T00:25:06,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:364270x0, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-28T00:25:06,381 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36427-0x1003b83f01b0003 connected 2024-11-28T00:25:06,385 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36427 2024-11-28T00:25:06,387 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36427 2024-11-28T00:25:06,396 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36427 2024-11-28T00:25:06,400 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36427 2024-11-28T00:25:06,403 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36427 2024-11-28T00:25:06,427 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;72c8c7f04009:39673 2024-11-28T00:25:06,428 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/72c8c7f04009,39673,1732753505055 2024-11-28T00:25:06,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,438 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,439 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,445 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/72c8c7f04009,39673,1732753505055 2024-11-28T00:25:06,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:25:06,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:25:06,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-28T00:25:06,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,478 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,480 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-28T00:25:06,484 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/72c8c7f04009,39673,1732753505055 from backup master directory 2024-11-28T00:25:06,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/72c8c7f04009,39673,1732753505055 2024-11-28T00:25:06,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,488 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,489 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-28T00:25:06,489 WARN [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:25:06,490 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=72c8c7f04009,39673,1732753505055 2024-11-28T00:25:06,493 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-28T00:25:06,495 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-28T00:25:06,567 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/hbase.id] with ID: 0971976f-020a-48db-8700-e23062cd273e 2024-11-28T00:25:06,567 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.tmp/hbase.id 2024-11-28T00:25:06,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:25:06,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:25:06,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741826_1002 (size=42) 2024-11-28T00:25:06,602 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.tmp/hbase.id]:[hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/hbase.id] 2024-11-28T00:25:06,653 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:06,660 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-28T00:25:06,686 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 24ms. 2024-11-28T00:25:06,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,691 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,692 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:06,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:25:06,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:25:06,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741827_1003 (size=196) 2024-11-28T00:25:06,749 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:25:06,751 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-28T00:25:06,772 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(244): No decryptEncryptedDataEncryptionKey method in DFSClient, should be hadoop version with HDFS-12396 java.lang.NoSuchMethodException: org.apache.hadoop.hdfs.DFSClient.decryptEncryptedDataEncryptionKey(org.apache.hadoop.fs.FileEncryptionInfo) at java.lang.Class.getDeclaredMethod(Class.java:2675) ~[?:?] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelperWithoutHDFS12396(FanOutOneBlockAsyncDFSOutputSaslHelper.java:183) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.createTransparentCryptoHelper(FanOutOneBlockAsyncDFSOutputSaslHelper.java:242) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper.(FanOutOneBlockAsyncDFSOutputSaslHelper.java:253) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Class.forName0(Native Method) ~[?:?] at java.lang.Class.forName(Class.java:375) ~[?:?] at org.apache.hadoop.hbase.wal.AsyncFSWALProvider.load(AsyncFSWALProvider.java:150) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.getProviderClass(WALFactory.java:174) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:262) ~[classes/:?] at org.apache.hadoop.hbase.wal.WALFactory.(WALFactory.java:231) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.create(MasterRegion.java:400) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegionFactory.create(MasterRegionFactory.java:135) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.finishActiveMasterInitialization(HMaster.java:1003) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.startActiveMasterManager(HMaster.java:2535) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.lambda$run$0(HMaster.java:613) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.lambda$tracedRunnable$2(TraceUtil.java:155) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:25:06,778 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:25:06,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:25:06,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:25:06,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741828_1004 (size=1189) 2024-11-28T00:25:06,862 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/data/master/store 2024-11-28T00:25:06,881 WARN [IPC Server handler 4 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:06,881 WARN [IPC Server handler 4 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:06,882 WARN [IPC Server handler 4 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:06,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:25:06,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:25:06,909 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-28T00:25:06,914 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:06,916 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T00:25:06,916 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:25:06,916 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:25:06,918 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T00:25:06,918 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:25:06,919 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:25:06,920 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732753506916Disabling compacts and flushes for region at 1732753506916Disabling writes for close at 1732753506918 (+2 ms)Writing region close event to WAL at 1732753506919 (+1 ms)Closed at 1732753506919 2024-11-28T00:25:06,924 WARN [master/72c8c7f04009:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/data/master/store/.initializing 2024-11-28T00:25:06,925 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055 2024-11-28T00:25:06,936 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:25:06,956 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=72c8c7f04009%2C39673%2C1732753505055, suffix=, logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055, archiveDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/oldWALs, maxLogs=10 2024-11-28T00:25:06,983 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962, exclude list is [], retry=0 2024-11-28T00:25:06,988 WARN [IPC Server handler 3 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:06,988 WARN [IPC Server handler 3 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:06,989 WARN [IPC Server handler 3 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:07,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43159,DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e,DISK] 2024-11-28T00:25:07,013 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45909,DS-fe60c522-f41b-4d88-9ac2-209cfed0f304,DISK] 2024-11-28T00:25:07,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.ProtobufDecoder(117): Hadoop 3.3 and above shades protobuf. 2024-11-28T00:25:07,070 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 2024-11-28T00:25:07,071 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36679:36679),(127.0.0.1/127.0.0.1:42613:42613)] 2024-11-28T00:25:07,072 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:25:07,073 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:07,078 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,079 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,134 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,175 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-28T00:25:07,180 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:07,185 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-28T00:25:07,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:07,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,194 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-28T00:25:07,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:07,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,201 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-28T00:25:07,201 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,202 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:07,203 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,208 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,209 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,215 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,216 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,220 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:25:07,225 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-28T00:25:07,231 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:07,232 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=69580300, jitterRate=0.036827266216278076}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:25:07,240 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732753507103Initializing all the Stores at 1732753507106 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753507107 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753507108 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753507108Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753507109 (+1 ms)Cleaning up temporary data from old regions at 1732753507216 (+107 ms)Region opened successfully at 1732753507240 (+24 ms) 2024-11-28T00:25:07,243 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-28T00:25:07,284 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@26a56495, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=72c8c7f04009/172.17.0.2:0 2024-11-28T00:25:07,320 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-28T00:25:07,334 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-28T00:25:07,335 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-28T00:25:07,338 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-28T00:25:07,339 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-28T00:25:07,344 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-28T00:25:07,345 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-28T00:25:07,372 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-28T00:25:07,383 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-28T00:25:07,385 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-28T00:25:07,389 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-28T00:25:07,391 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-28T00:25:07,393 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-28T00:25:07,395 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-28T00:25:07,399 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-28T00:25:07,401 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-28T00:25:07,403 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-28T00:25:07,404 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-28T00:25:07,427 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-28T00:25:07,429 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:25:07,434 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-28T00:25:07,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,435 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,439 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=72c8c7f04009,39673,1732753505055, sessionid=0x1003b83f01b0000, setting cluster-up flag (Was=false) 2024-11-28T00:25:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,452 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,460 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-28T00:25:07,462 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=72c8c7f04009,39673,1732753505055 2024-11-28T00:25:07,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,467 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:07,475 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-28T00:25:07,477 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=72c8c7f04009,39673,1732753505055 2024-11-28T00:25:07,485 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-28T00:25:07,508 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(746): ClusterId : 0971976f-020a-48db-8700-e23062cd273e 2024-11-28T00:25:07,508 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(746): ClusterId : 0971976f-020a-48db-8700-e23062cd273e 2024-11-28T00:25:07,510 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(746): ClusterId : 0971976f-020a-48db-8700-e23062cd273e 2024-11-28T00:25:07,511 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:25:07,511 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:25:07,511 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-28T00:25:07,518 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:25:07,518 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:25:07,518 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-28T00:25:07,518 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:25:07,518 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:25:07,518 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-28T00:25:07,522 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:25:07,522 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:25:07,523 DEBUG [RS:0;72c8c7f04009:43335 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78b88a96, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=72c8c7f04009/172.17.0.2:0 2024-11-28T00:25:07,523 DEBUG [RS:2;72c8c7f04009:36427 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ba6421f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=72c8c7f04009/172.17.0.2:0 2024-11-28T00:25:07,527 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-28T00:25:07,527 DEBUG [RS:1;72c8c7f04009:41621 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cdb510b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=72c8c7f04009/172.17.0.2:0 2024-11-28T00:25:07,531 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(3441): Registered master coprocessor service: service=AccessControlService 2024-11-28T00:25:07,541 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:07,542 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver loaded, priority=536870912. 2024-11-28T00:25:07,545 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;72c8c7f04009:43335 2024-11-28T00:25:07,547 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:2;72c8c7f04009:36427 2024-11-28T00:25:07,550 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:25:07,550 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:25:07,550 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:25:07,550 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:25:07,550 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:25:07,550 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:25:07,551 INFO [RS:0;72c8c7f04009:43335 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:07,551 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:25:07,551 INFO [RS:2;72c8c7f04009:36427 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:07,551 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:25:07,555 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=43335, startcode=1732753506004 2024-11-28T00:25:07,556 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=36427, startcode=1732753506332 2024-11-28T00:25:07,576 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;72c8c7f04009:41621 2024-11-28T00:25:07,576 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-28T00:25:07,576 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-28T00:25:07,576 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(660): Registered regionserver coprocessor executorService: executorService=AccessControlService 2024-11-28T00:25:07,577 INFO [RS:1;72c8c7f04009:41621 {}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:07,577 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-28T00:25:07,580 DEBUG [RS:0;72c8c7f04009:43335 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:25:07,580 DEBUG [RS:2;72c8c7f04009:36427 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:25:07,585 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=41621, startcode=1732753506207 2024-11-28T00:25:07,586 DEBUG [RS:1;72c8c7f04009:41621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-28T00:25:07,642 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-28T00:25:07,659 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38929, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:25:07,660 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33133, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:25:07,662 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59599, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-28T00:25:07,667 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,671 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-28T00:25:07,680 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,681 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,705 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-28T00:25:07,708 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,709 WARN [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-28T00:25:07,709 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,709 WARN [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-28T00:25:07,709 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,710 WARN [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 100 ms and then retrying. 2024-11-28T00:25:07,714 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 72c8c7f04009,39673,1732753505055 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-28T00:25:07,724 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/72c8c7f04009:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:25:07,724 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/72c8c7f04009:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:25:07,724 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/72c8c7f04009:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:25:07,724 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/72c8c7f04009:0, corePoolSize=5, maxPoolSize=5 2024-11-28T00:25:07,725 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/72c8c7f04009:0, corePoolSize=10, maxPoolSize=10 2024-11-28T00:25:07,725 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:07,725 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/72c8c7f04009:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:25:07,725 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:07,739 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:25:07,740 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-28T00:25:07,748 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,748 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T00:25:07,756 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732753537756 2024-11-28T00:25:07,757 WARN [IPC Server handler 2 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,757 WARN [IPC Server handler 2 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:07,757 WARN [IPC Server handler 2 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:07,758 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-28T00:25:07,759 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-28T00:25:07,763 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-28T00:25:07,763 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-28T00:25:07,763 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-28T00:25:07,763 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-28T00:25:07,767 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:07,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:25:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:25:07,783 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-28T00:25:07,784 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:07,792 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-28T00:25:07,793 WARN [IPC Server handler 1 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,793 WARN [IPC Server handler 1 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:07,793 WARN [IPC Server handler 1 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:07,794 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-28T00:25:07,795 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-28T00:25:07,800 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-28T00:25:07,800 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-28T00:25:07,810 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=41621, startcode=1732753506207 2024-11-28T00:25:07,810 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=43335, startcode=1732753506004 2024-11-28T00:25:07,811 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=36427, startcode=1732753506332 2024-11-28T00:25:07,812 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,813 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,813 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,813 WARN [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-28T00:25:07,814 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.ipc.ServerNotRunningYetException: Server is not running yet at org.apache.hadoop.hbase.master.HMaster.checkServiceStarted(HMaster.java:3334) ~[classes/:?] at org.apache.hadoop.hbase.master.MasterRpcServices.regionServerStartup(MasterRpcServices.java:667) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$2.callBlockingMethod(RegionServerStatusProtos.java:16714) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:07,814 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,814 WARN [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-28T00:25:07,816 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(2683): Master is not running yet 2024-11-28T00:25:07,816 WARN [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(841): reportForDuty failed; sleeping 200 ms and then retrying. 2024-11-28T00:25:07,820 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.large.0-1732753507802,5,FailOnTimeoutGroup] 2024-11-28T00:25:07,831 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.small.0-1732753507820,5,FailOnTimeoutGroup] 2024-11-28T00:25:07,832 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:07,832 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-28T00:25:07,833 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:25:07,834 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:07,834 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:07,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:25:07,837 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:07,840 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T00:25:07,842 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T00:25:07,842 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:07,844 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-28T00:25:07,847 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-28T00:25:07,847 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:07,848 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T00:25:07,851 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T00:25:07,851 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:07,852 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T00:25:07,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T00:25:07,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:07,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:07,856 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-28T00:25:07,858 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740 2024-11-28T00:25:07,859 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740 2024-11-28T00:25:07,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-28T00:25:07,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-28T00:25:07,863 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:25:07,866 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-28T00:25:07,873 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:07,874 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66787940, jitterRate=-0.004782140254974365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:25:07,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732753507837Initializing all the Stores at 1732753507839 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753507839Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753507840 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753507840Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753507840Cleaning up temporary data from old regions at 1732753507862 (+22 ms)Region opened successfully at 1732753507876 (+14 ms) 2024-11-28T00:25:07,876 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-28T00:25:07,877 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-28T00:25:07,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-28T00:25:07,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T00:25:07,877 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T00:25:07,878 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-28T00:25:07,878 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732753507876Disabling compacts and flushes for region at 1732753507876Disabling writes for close at 1732753507877 (+1 ms)Writing region close event to WAL at 1732753507878 (+1 ms)Closed at 1732753507878 2024-11-28T00:25:07,883 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:25:07,883 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-28T00:25:07,891 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-28T00:25:07,903 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T00:25:07,909 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-28T00:25:07,933 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,934 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,934 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:07,934 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:07,935 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,935 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,935 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:07,936 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:07,936 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,936 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:07,936 WARN [RedundancyMonitor {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK, ARCHIVE], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:07,937 WARN [RedundancyMonitor {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=false) All required storage types are unavailable: unavailableStorages=[DISK, ARCHIVE], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:08,016 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=43335, startcode=1732753506004 2024-11-28T00:25:08,017 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=36427, startcode=1732753506332 2024-11-28T00:25:08,019 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(2659): reportForDuty to master=72c8c7f04009,39673,1732753505055 with port=41621, startcode=1732753506207 2024-11-28T00:25:08,019 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,022 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(517): Registering regionserver=72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,041 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,041 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(517): Registering regionserver=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,042 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:08,042 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43901 2024-11-28T00:25:08,042 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:25:08,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,045 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] master.ServerManager(517): Registering regionserver=72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,045 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:08,045 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43901 2024-11-28T00:25:08,045 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:25:08,052 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:08,052 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43901 2024-11-28T00:25:08,052 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-28T00:25:08,055 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T00:25:08,056 DEBUG [RS:2;72c8c7f04009:36427 {}] zookeeper.ZKUtil(111): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,056 DEBUG [RS:0;72c8c7f04009:43335 {}] zookeeper.ZKUtil(111): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,056 WARN [RS:0;72c8c7f04009:43335 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:25:08,056 WARN [RS:2;72c8c7f04009:36427 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:25:08,056 INFO [RS:0;72c8c7f04009:43335 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:25:08,056 INFO [RS:2;72c8c7f04009:36427 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:25:08,056 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,056 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,060 DEBUG [RS:1;72c8c7f04009:41621 {}] zookeeper.ZKUtil(111): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,060 WARN [72c8c7f04009:39673 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-28T00:25:08,060 WARN [RS:1;72c8c7f04009:41621 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-28T00:25:08,060 INFO [RS:1;72c8c7f04009:41621 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:25:08,061 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,072 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [72c8c7f04009,36427,1732753506332] 2024-11-28T00:25:08,073 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [72c8c7f04009,43335,1732753506004] 2024-11-28T00:25:08,073 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [72c8c7f04009,41621,1732753506207] 2024-11-28T00:25:08,119 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:25:08,119 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:25:08,119 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-28T00:25:08,142 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:25:08,142 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:25:08,148 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-28T00:25:08,152 INFO [RS:0;72c8c7f04009:43335 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:25:08,152 INFO [RS:1;72c8c7f04009:41621 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:25:08,152 INFO [RS:2;72c8c7f04009:36427 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-28T00:25:08,152 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,152 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,152 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,154 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:25:08,154 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:25:08,154 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-28T00:25:08,162 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:25:08,162 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:25:08,163 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-28T00:25:08,163 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,163 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,163 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,164 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/72c8c7f04009:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/72c8c7f04009:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/72c8c7f04009:0, corePoolSize=2, maxPoolSize=2 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,165 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,166 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,166 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,166 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/72c8c7f04009:0, corePoolSize=1, maxPoolSize=1 2024-11-28T00:25:08,166 DEBUG [RS:1;72c8c7f04009:41621 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,166 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,166 DEBUG [RS:0;72c8c7f04009:43335 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,166 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,166 DEBUG [RS:2;72c8c7f04009:36427 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/72c8c7f04009:0, corePoolSize=3, maxPoolSize=3 2024-11-28T00:25:08,188 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,188 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,189 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,189 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,189 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,189 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,41621,1732753506207-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,211 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,43335,1732753506004-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:25:08,222 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,229 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,36427,1732753506332-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:25:08,240 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,41621,1732753506207-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,240 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,241 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.Replication(171): 72c8c7f04009,41621,1732753506207 started 2024-11-28T00:25:08,253 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:25:08,254 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,43335,1732753506004-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,254 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,254 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.Replication(171): 72c8c7f04009,43335,1732753506004 started 2024-11-28T00:25:08,257 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-28T00:25:08,257 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,36427,1732753506332-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,257 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,258 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.Replication(171): 72c8c7f04009,36427,1732753506332 started 2024-11-28T00:25:08,269 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,270 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1482): Serving as 72c8c7f04009,41621,1732753506207, RpcServer on 72c8c7f04009/172.17.0.2:41621, sessionid=0x1003b83f01b0002 2024-11-28T00:25:08,271 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:25:08,271 DEBUG [RS:1;72c8c7f04009:41621 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,271 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,41621,1732753506207' 2024-11-28T00:25:08,272 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:25:08,273 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:25:08,275 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:25:08,275 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:25:08,275 DEBUG [RS:1;72c8c7f04009:41621 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 72c8c7f04009,41621,1732753506207 2024-11-28T00:25:08,275 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,41621,1732753506207' 2024-11-28T00:25:08,275 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:25:08,280 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:25:08,282 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,283 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1482): Serving as 72c8c7f04009,36427,1732753506332, RpcServer on 72c8c7f04009/172.17.0.2:36427, sessionid=0x1003b83f01b0003 2024-11-28T00:25:08,283 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:25:08,283 DEBUG [RS:2;72c8c7f04009:36427 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,283 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,36427,1732753506332' 2024-11-28T00:25:08,283 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:25:08,284 DEBUG [RS:1;72c8c7f04009:41621 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:25:08,284 INFO [RS:1;72c8c7f04009:41621 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:25:08,284 INFO [RS:1;72c8c7f04009:41621 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:25:08,284 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:25:08,285 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:25:08,285 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:25:08,285 DEBUG [RS:2;72c8c7f04009:36427 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,285 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,36427,1732753506332' 2024-11-28T00:25:08,285 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:25:08,285 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:08,285 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1482): Serving as 72c8c7f04009,43335,1732753506004, RpcServer on 72c8c7f04009/172.17.0.2:43335, sessionid=0x1003b83f01b0001 2024-11-28T00:25:08,286 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-28T00:25:08,286 DEBUG [RS:0;72c8c7f04009:43335 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,286 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:25:08,286 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,43335,1732753506004' 2024-11-28T00:25:08,286 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-28T00:25:08,287 DEBUG [RS:2;72c8c7f04009:36427 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:25:08,287 INFO [RS:2;72c8c7f04009:36427 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:25:08,287 INFO [RS:2;72c8c7f04009:36427 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:25:08,287 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-28T00:25:08,288 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-28T00:25:08,288 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-28T00:25:08,288 DEBUG [RS:0;72c8c7f04009:43335 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 72c8c7f04009,43335,1732753506004 2024-11-28T00:25:08,288 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '72c8c7f04009,43335,1732753506004' 2024-11-28T00:25:08,288 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-28T00:25:08,293 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-28T00:25:08,294 DEBUG [RS:0;72c8c7f04009:43335 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-28T00:25:08,294 INFO [RS:0;72c8c7f04009:43335 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-28T00:25:08,294 INFO [RS:0;72c8c7f04009:43335 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-28T00:25:08,393 INFO [RS:1;72c8c7f04009:41621 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:25:08,393 INFO [RS:2;72c8c7f04009:36427 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:25:08,395 INFO [RS:0;72c8c7f04009:43335 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defaultMonitorName 2024-11-28T00:25:08,400 INFO [RS:1;72c8c7f04009:41621 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=72c8c7f04009%2C41621%2C1732753506207, suffix=, logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,41621,1732753506207, archiveDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs, maxLogs=32 2024-11-28T00:25:08,400 INFO [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=72c8c7f04009%2C36427%2C1732753506332, suffix=, logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332, archiveDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs, maxLogs=32 2024-11-28T00:25:08,401 INFO [RS:0;72c8c7f04009:43335 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=72c8c7f04009%2C43335%2C1732753506004, suffix=, logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,43335,1732753506004, archiveDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs, maxLogs=32 2024-11-28T00:25:08,430 DEBUG [RS:1;72c8c7f04009:41621 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,41621,1732753506207/72c8c7f04009%2C41621%2C1732753506207.1732753508407, exclude list is [], retry=0 2024-11-28T00:25:08,430 DEBUG [RS:2;72c8c7f04009:36427 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332/72c8c7f04009%2C36427%2C1732753506332.1732753508405, exclude list is [], retry=0 2024-11-28T00:25:08,438 DEBUG [RS:0;72c8c7f04009:43335 {}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,43335,1732753506004/72c8c7f04009%2C43335%2C1732753506004.1732753508405, exclude list is [], retry=0 2024-11-28T00:25:08,440 WARN [IPC Server handler 4 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:08,440 WARN [IPC Server handler 4 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:08,440 WARN [IPC Server handler 4 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:08,442 WARN [IPC Server handler 2 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:08,442 WARN [IPC Server handler 2 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:08,442 WARN [IPC Server handler 2 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:08,448 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45909,DS-fe60c522-f41b-4d88-9ac2-209cfed0f304,DISK] 2024-11-28T00:25:08,450 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45909,DS-fe60c522-f41b-4d88-9ac2-209cfed0f304,DISK] 2024-11-28T00:25:08,452 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43159,DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e,DISK] 2024-11-28T00:25:08,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43159,DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e,DISK] 2024-11-28T00:25:08,453 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43159,DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e,DISK] 2024-11-28T00:25:08,454 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45909,DS-fe60c522-f41b-4d88-9ac2-209cfed0f304,DISK] 2024-11-28T00:25:08,464 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:39705,DS-8b96cc8c-61ab-4969-a00a-3e67bb0de7f6,DISK] 2024-11-28T00:25:08,486 INFO [RS:1;72c8c7f04009:41621 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,41621,1732753506207/72c8c7f04009%2C41621%2C1732753506207.1732753508407 2024-11-28T00:25:08,521 DEBUG [RS:1;72c8c7f04009:41621 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36679:36679),(127.0.0.1/127.0.0.1:42613:42613)] 2024-11-28T00:25:08,525 INFO [RS:0;72c8c7f04009:43335 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,43335,1732753506004/72c8c7f04009%2C43335%2C1732753506004.1732753508405 2024-11-28T00:25:08,526 DEBUG [RS:0;72c8c7f04009:43335 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36679:36679),(127.0.0.1/127.0.0.1:42613:42613)] 2024-11-28T00:25:08,529 INFO [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332/72c8c7f04009%2C36427%2C1732753506332.1732753508405 2024-11-28T00:25:08,531 DEBUG [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:35157:35157),(127.0.0.1/127.0.0.1:42613:42613),(127.0.0.1/127.0.0.1:36679:36679)] 2024-11-28T00:25:08,563 DEBUG [72c8c7f04009:39673 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=3, allServersCount=3 2024-11-28T00:25:08,571 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:25:08,579 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:25:08,579 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:25:08,579 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:25:08,580 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:25:08,580 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:25:08,580 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:25:08,580 INFO [72c8c7f04009:39673 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:25:08,580 INFO [72c8c7f04009:39673 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:25:08,580 INFO [72c8c7f04009:39673 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:25:08,580 DEBUG [72c8c7f04009:39673 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:25:08,592 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,605 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 72c8c7f04009,36427,1732753506332, state=OPENING 2024-11-28T00:25:08,612 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-28T00:25:08,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:08,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:08,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:08,616 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:08,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,617 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,620 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-28T00:25:08,623 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:08,803 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:08,806 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43833, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:08,817 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-28T00:25:08,817 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.AsyncFSWALProvider 2024-11-28T00:25:08,817 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor .meta 2024-11-28T00:25:08,820 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=72c8c7f04009%2C36427%2C1732753506332.meta, suffix=.meta, logDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332, archiveDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs, maxLogs=32 2024-11-28T00:25:08,837 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] asyncfs.FanOutOneBlockAsyncDFSOutputHelper(524): When create output stream for /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332/72c8c7f04009%2C36427%2C1732753506332.meta.1732753508822.meta, exclude list is [], retry=0 2024-11-28T00:25:08,840 WARN [IPC Server handler 1 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-28T00:25:08,840 WARN [IPC Server handler 1 on default port 43901 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=3, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-28T00:25:08,840 WARN [IPC Server handler 1 on default port 43901 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 3 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-28T00:25:08,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:45909,DS-fe60c522-f41b-4d88-9ac2-209cfed0f304,DISK] 2024-11-28T00:25:08,842 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] asyncfs.FanOutOneBlockAsyncDFSOutputSaslHelper(816): SASL client skipping handshake in unsecured configuration for addr = 127.0.0.1/127.0.0.1, datanodeId = DatanodeInfoWithStorage[127.0.0.1:43159,DS-d18dbf5e-0d5c-45b4-8b37-1e2e7919ab8e,DISK] 2024-11-28T00:25:08,846 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/WALs/72c8c7f04009,36427,1732753506332/72c8c7f04009%2C36427%2C1732753506332.meta.1732753508822.meta 2024-11-28T00:25:08,847 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new AsyncFSWAL writer with pipeline: [(127.0.0.1/127.0.0.1:36679:36679),(127.0.0.1/127.0.0.1:42613:42613)] 2024-11-28T00:25:08,847 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:25:08,849 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=AccessControlService 2024-11-28T00:25:08,850 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:08,851 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-28T00:25:08,854 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-28T00:25:08,855 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-28T00:25:08,864 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-28T00:25:08,864 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:08,864 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-28T00:25:08,864 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-28T00:25:08,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-28T00:25:08,869 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-28T00:25:08,869 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:08,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:08,871 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-28T00:25:08,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-28T00:25:08,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:08,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:08,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-28T00:25:08,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-28T00:25:08,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:08,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:08,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-28T00:25:08,877 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-28T00:25:08,877 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:08,878 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-28T00:25:08,878 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-28T00:25:08,879 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740 2024-11-28T00:25:08,882 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740 2024-11-28T00:25:08,885 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-28T00:25:08,885 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-28T00:25:08,886 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-28T00:25:08,889 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-28T00:25:08,891 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59205789, jitterRate=-0.11776499450206757}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-28T00:25:08,891 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-28T00:25:08,896 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732753508865Writing region info on filesystem at 1732753508865Initializing all the Stores at 1732753508867 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753508867Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753508867Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753508867Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753508868 (+1 ms)Cleaning up temporary data from old regions at 1732753508885 (+17 ms)Running coprocessor post-open hooks at 1732753508891 (+6 ms)Region opened successfully at 1732753508896 (+5 ms) 2024-11-28T00:25:08,904 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732753508794 2024-11-28T00:25:08,915 DEBUG [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-28T00:25:08,916 INFO [RS_OPEN_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-28T00:25:08,918 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,921 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 72c8c7f04009,36427,1732753506332, state=OPEN 2024-11-28T00:25:08,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:25:08,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:25:08,924 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:25:08,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,924 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,925 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-28T00:25:08,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,925 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-28T00:25:08,927 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:08,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-28T00:25:08,935 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=72c8c7f04009,36427,1732753506332 in 305 msec 2024-11-28T00:25:08,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-28T00:25:08,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.0450 sec 2024-11-28T00:25:08,944 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-28T00:25:08,944 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-28T00:25:08,973 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:08,974 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:08,998 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:09,002 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52447, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:09,031 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.4790 sec 2024-11-28T00:25:09,032 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732753509031, completionTime=-1 2024-11-28T00:25:09,036 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=3; waited=0ms, expected min=3 server(s), max=3 server(s), master is running 2024-11-28T00:25:09,036 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-28T00:25:09,078 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=3 2024-11-28T00:25:09,078 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732753569078 2024-11-28T00:25:09,078 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732753629078 2024-11-28T00:25:09,078 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 42 msec 2024-11-28T00:25:09,080 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 1588230740 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:25:09,098 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,098 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,098 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,101 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-72c8c7f04009:39673, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,101 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,108 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:09,117 DEBUG [master/72c8c7f04009:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-28T00:25:09,156 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 2.665sec 2024-11-28T00:25:09,159 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-28T00:25:09,160 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-28T00:25:09,162 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-28T00:25:09,163 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-28T00:25:09,163 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-28T00:25:09,164 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-28T00:25:09,165 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-28T00:25:09,220 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:25:09,220 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] client.AsyncConnectionImpl(321): The fetched master address is 72c8c7f04009,39673,1732753505055 2024-11-28T00:25:09,223 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7370a8ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:09,227 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6006bb23 2024-11-28T00:25:09,230 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-28T00:25:09,230 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-28T00:25:09,230 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:25:09,233 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42323, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:25:09,238 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:09,241 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'hbase:acl', {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-28T00:25:09,245 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:09,260 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:09,265 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:09,265 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=hbase:acl 2024-11-28T00:25:09,267 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:09,269 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78ac4767, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:09,269 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:09,269 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:25:09,270 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:09,271 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "hbase" qualifier: "acl" procId is: 4 2024-11-28T00:25:09,319 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:09,320 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:25:09,328 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:25:09,353 DEBUG [RPCClient-NioEventLoopGroup-6-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:09,360 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35244, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:09,373 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@129fe469, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:09,373 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:09,384 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:09,385 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:09,396 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43568, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:09,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=72c8c7f04009,39673,1732753505055 2024-11-28T00:25:09,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2305): Starting mini mapreduce cluster... 2024-11-28T00:25:09,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/test.cache.data in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.tmp.dir in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:25:09,404 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-28T00:25:09,405 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/nfs.dump.dir in system properties and HBase conf 2024-11-28T00:25:09,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:25:09,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir in system properties and HBase conf 2024-11-28T00:25:09,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-28T00:25:09,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-28T00:25:09,406 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-28T00:25:09,406 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741837_1013 (size=349) 2024-11-28T00:25:09,419 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 91195ca5c70c54af0f84245ffc432b72, NAME => 'hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:acl', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:09,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:25:09,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:25:09,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741838_1014 (size=36) 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(898): Instantiated hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1722): Closing 91195ca5c70c54af0f84245ffc432b72, disabling compactions & flushes 2024-11-28T00:25:09,469 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1755): Closing region hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. after waiting 0 ms 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:09,469 INFO [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1973): Closed hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:09,469 DEBUG [RegionOpenAndInit-hbase:acl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 91195ca5c70c54af0f84245ffc432b72: Waiting for close lock at 1732753509469Disabling compacts and flushes for region at 1732753509469Disabling writes for close at 1732753509469Writing region close event to WAL at 1732753509469Closed at 1732753509469 2024-11-28T00:25:09,473 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:25:09,480 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.","families":{"info":[{"qualifier":"regioninfo","vlen":35,"tag":[],"timestamp":"1732753509474"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753509474"}]},"ts":"1732753509474"} 2024-11-28T00:25:09,482 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:25:09,491 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-28T00:25:09,494 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:25:09,498 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753509494"}]},"ts":"1732753509494"} 2024-11-28T00:25:09,506 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLING in hbase:meta 2024-11-28T00:25:09,506 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:25:09,510 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:25:09,510 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:25:09,511 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:25:09,511 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:25:09,511 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:25:09,513 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=91195ca5c70c54af0f84245ffc432b72, ASSIGN}] 2024-11-28T00:25:09,520 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=91195ca5c70c54af0f84245ffc432b72, ASSIGN 2024-11-28T00:25:09,522 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=91195ca5c70c54af0f84245ffc432b72, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:25:09,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:25:09,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:25:09,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741839_1015 (size=592039) 2024-11-28T00:25:09,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:25:09,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:25:09,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741840_1016 (size=1663647) 2024-11-28T00:25:09,687 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:25:09,688 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=91195ca5c70c54af0f84245ffc432b72, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:25:09,692 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:25:09,694 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:acl, region=91195ca5c70c54af0f84245ffc432b72, ASSIGN because future has completed 2024-11-28T00:25:09,701 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91195ca5c70c54af0f84245ffc432b72, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:25:09,888 DEBUG [RSProcedureDispatcher-pool-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:09,911 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59805, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:09,946 INFO [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(132): Open hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:09,946 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 91195ca5c70c54af0f84245ffc432b72, NAME => 'hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:25:09,954 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. service=AccessControlService 2024-11-28T00:25:09,955 INFO [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:09,955 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table acl 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,955 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(898): Instantiated hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:09,956 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,956 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,964 INFO [StoreOpener-91195ca5c70c54af0f84245ffc432b72-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family l of region 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,978 INFO [StoreOpener-91195ca5c70c54af0f84245ffc432b72-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 91195ca5c70c54af0f84245ffc432b72 columnFamilyName l 2024-11-28T00:25:09,978 DEBUG [StoreOpener-91195ca5c70c54af0f84245ffc432b72-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:09,980 INFO [StoreOpener-91195ca5c70c54af0f84245ffc432b72-1 {}] regionserver.HStore(327): Store=91195ca5c70c54af0f84245ffc432b72/l, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:09,981 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,983 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,984 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,985 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,985 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:09,993 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:10,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:25:10,005 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:10,007 INFO [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1114): Opened 91195ca5c70c54af0f84245ffc432b72; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67277540, jitterRate=0.0025134682655334473}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:10,007 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:25:10,009 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 91195ca5c70c54af0f84245ffc432b72: Running coprocessor pre-open hook at 1732753509956Writing region info on filesystem at 1732753509956Initializing all the Stores at 1732753509960 (+4 ms)Instantiating store for column family {NAME => 'l', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'NONE', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732753509960Cleaning up temporary data from old regions at 1732753509985 (+25 ms)Running coprocessor post-open hooks at 1732753510007 (+22 ms)Region opened successfully at 1732753510009 (+2 ms) 2024-11-28T00:25:10,016 INFO [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., pid=6, masterSystemTime=1732753509888 2024-11-28T00:25:10,023 DEBUG [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:10,023 INFO [RS_OPEN_PRIORITY_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_PRIORITY_REGION, pid=6}] handler.AssignRegionHandler(153): Opened hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:25:10,024 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=91195ca5c70c54af0f84245ffc432b72, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:25:10,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 91195ca5c70c54af0f84245ffc432b72, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:25:10,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-28T00:25:10,043 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 91195ca5c70c54af0f84245ffc432b72, server=72c8c7f04009,41621,1732753506207 in 335 msec 2024-11-28T00:25:10,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-28T00:25:10,050 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:acl, region=91195ca5c70c54af0f84245ffc432b72, ASSIGN in 531 msec 2024-11-28T00:25:10,052 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:25:10,052 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"hbase:acl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753510052"}]},"ts":"1732753510052"} 2024-11-28T00:25:10,056 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=hbase:acl, state=ENABLED in hbase:meta 2024-11-28T00:25:10,058 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=hbase:acl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:25:10,062 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=hbase:acl in 813 msec 2024-11-28T00:25:10,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-28T00:25:10,514 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: hbase:acl completed 2024-11-28T00:25:10,532 DEBUG [master/72c8c7f04009:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-28T00:25:10,534 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-28T00:25:10,534 INFO [master/72c8c7f04009:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=72c8c7f04009,39673,1732753505055-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-28T00:25:11,624 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:11,778 WARN [Thread-370 {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:12,068 INFO [Thread-370 {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:12,072 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:25:12,077 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:12,096 INFO [Thread-370 {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:12,096 INFO [Thread-370 {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:12,096 INFO [Thread-370 {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:25:12,100 INFO [Thread-370 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46180687{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:12,100 INFO [Thread-370 {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@126eaf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:12,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:12,108 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:12,108 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:25:12,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:12,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@295a1b72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:12,134 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ca85fc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:12,323 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver as a provider class 2024-11-28T00:25:12,324 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices as a root resource class 2024-11-28T00:25:12,324 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:25:12,326 INFO [Thread-370 {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:25:12,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741831_1007 (size=1321) 2024-11-28T00:25:12,412 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:12,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741832_1008 (size=32) 2024-11-28T00:25:12,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741829_1005 (size=34) 2024-11-28T00:25:12,862 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:13,329 INFO [Thread-370 {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:13,362 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1548d9d4{cluster,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-41727-hadoop-yarn-common-3_4_1_jar-_-any-11101364566300264884/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-28T00:25:13,363 INFO [Thread-370 {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@648e364d{jobhistory,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-40305-hadoop-yarn-common-3_4_1_jar-_-any-6248137534272940888/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-28T00:25:13,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7ab73e22{HTTP/1.1, (http/1.1)}{localhost:41727} 2024-11-28T00:25:13,363 INFO [Time-limited test {}] server.Server(415): Started @16723ms 2024-11-28T00:25:13,364 INFO [Thread-370 {}] server.AbstractConnector(333): Started ServerConnector@40399fe0{HTTP/1.1, (http/1.1)}{localhost:40305} 2024-11-28T00:25:13,364 INFO [Thread-370 {}] server.Server(415): Started @16724ms 2024-11-28T00:25:13,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:25:13,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:25:13,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741841_1017 (size=5) 2024-11-28T00:25:14,384 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:25:14,523 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-28T00:25:14,526 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:acl' 2024-11-28T00:25:15,160 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-28T00:25:15,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:15,241 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:25:15,242 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:15,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:15,252 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:15,252 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:25:15,253 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:15,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77fc9ec2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:15,260 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6caa23cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:15,365 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-28T00:25:15,365 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:25:15,365 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-28T00:25:15,365 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:25:15,385 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:15,439 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:15,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:25:15,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionServerObservers 2024-11-28T00:25:15,543 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T00:25:15,543 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-28T00:25:15,547 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-28T00:25:15,547 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_acl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:25:15,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:25:15,549 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase MasterObservers 2024-11-28T00:25:15,549 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:25:15,549 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver Metrics about HBase MasterObservers 2024-11-28T00:25:15,550 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:25:15,550 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController Metrics about HBase RegionObservers 2024-11-28T00:25:15,550 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:25:15,550 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-28T00:25:15,550 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-28T00:25:15,550 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-28T00:25:15,616 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:15,632 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5b49ba0a{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-45285-hadoop-yarn-common-3_4_1_jar-_-any-9176488716702333119/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:25:15,648 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3318f13{HTTP/1.1, (http/1.1)}{localhost:45285} 2024-11-28T00:25:15,648 INFO [Time-limited test {}] server.Server(415): Started @19008ms 2024-11-28T00:25:15,972 WARN [Time-limited test {}] tracker.NMLogAggregationStatusTracker(95): Log Aggregation is disabled.So is the LogAggregationStatusTracker. 2024-11-28T00:25:15,976 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:15,991 WARN [Time-limited test {}] servlet.GuiceFilter(102): Multiple Servlet injectors detected. This is a warning indicating that you have more than one GuiceFilter running in your web application. If this is deliberate, you may safely ignore this message. If this is NOT deliberate however, your application may not work as expected. 2024-11-28T00:25:15,992 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-28T00:25:15,999 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-28T00:25:16,000 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-28T00:25:16,000 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-28T00:25:16,001 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-28T00:25:16,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13781459{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,AVAILABLE} 2024-11-28T00:25:16,003 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d2a247c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,AVAILABLE} 2024-11-28T00:25:16,091 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(116): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices as a root resource class 2024-11-28T00:25:16,092 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.webapp.GenericExceptionHandler as a provider class 2024-11-28T00:25:16,092 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(113): Registering org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver as a provider class 2024-11-28T00:25:16,092 INFO [Time-limited test {}] application.WebApplicationImpl(815): Initiating Jersey application, version 'Jersey: 1.19.4 05/24/2017 03:20 PM' 2024-11-28T00:25:16,104 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.JAXBContextResolver to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:16,111 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.webapp.GenericExceptionHandler to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:16,247 INFO [Time-limited test {}] container.GuiceComponentProviderFactory(168): Binding org.apache.hadoop.yarn.server.nodemanager.webapp.NMWebServices to GuiceManagedComponentProvider with the scope "Singleton" 2024-11-28T00:25:16,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1a21ff78{node,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/java.io.tmpdir/jetty-localhost-43055-hadoop-yarn-common-3_4_1_jar-_-any-1173433921513019151/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:25:16,255 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e09654e{HTTP/1.1, (http/1.1)}{localhost:43055} 2024-11-28T00:25:16,255 INFO [Time-limited test {}] server.Server(415): Started @19615ms 2024-11-28T00:25:16,298 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2341): Mini mapreduce cluster started 2024-11-28T00:25:16,300 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [30,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:25:16,343 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=712, OpenFileDescriptor=755, MaxFileDescriptor=1048576, SystemLoadAverage=343, ProcessCount=11, AvailableMemoryMB=7495 2024-11-28T00:25:16,346 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=712 is superior to 500 2024-11-28T00:25:16,354 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:25:16,365 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 72c8c7f04009,39673,1732753505055 2024-11-28T00:25:16,365 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@4be24d23 2024-11-28T00:25:16,366 DEBUG [RPCClient-NioEventLoopGroup-6-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:25:16,370 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38988, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:25:16,372 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:25:16,377 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:16,380 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:25:16,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSplitRegion" procId is: 7 2024-11-28T00:25:16,384 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:25:16,387 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:25:16,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:25:16,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:25:16,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741842_1018 (size=458) 2024-11-28T00:25:16,439 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7e74f876de2bcc196d763b50943e18f2, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:16,441 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 875b43592001846889c7b989a9e36642, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:16,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:25:16,491 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:25:16,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741843_1019 (size=83) 2024-11-28T00:25:16,492 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:25:16,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:16,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 7e74f876de2bcc196d763b50943e18f2, disabling compactions & flushes 2024-11-28T00:25:16,493 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. after waiting 0 ms 2024-11-28T00:25:16,493 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,494 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,494 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7e74f876de2bcc196d763b50943e18f2: Waiting for close lock at 1732753516493Disabling compacts and flushes for region at 1732753516493Disabling writes for close at 1732753516493Writing region close event to WAL at 1732753516493Closed at 1732753516493 2024-11-28T00:25:16,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:25:16,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:25:16,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741844_1020 (size=83) 2024-11-28T00:25:16,513 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:16,513 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1722): Closing 875b43592001846889c7b989a9e36642, disabling compactions & flushes 2024-11-28T00:25:16,513 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,514 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,514 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. after waiting 0 ms 2024-11-28T00:25:16,514 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,514 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,514 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSplitRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for 875b43592001846889c7b989a9e36642: Waiting for close lock at 1732753516513Disabling compacts and flushes for region at 1732753516513Disabling writes for close at 1732753516514 (+1 ms)Writing region close event to WAL at 1732753516514Closed at 1732753516514 2024-11-28T00:25:16,517 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:25:16,518 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732753516517"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753516517"}]},"ts":"1732753516517"} 2024-11-28T00:25:16,518 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732753516517"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753516517"}]},"ts":"1732753516517"} 2024-11-28T00:25:16,573 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:25:16,576 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:25:16,576 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753516576"}]},"ts":"1732753516576"} 2024-11-28T00:25:16,580 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-28T00:25:16,580 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:25:16,583 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:25:16,583 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:25:16,583 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:25:16,583 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:25:16,583 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, ASSIGN}, {pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, ASSIGN}] 2024-11-28T00:25:16,586 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, ASSIGN 2024-11-28T00:25:16,586 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, ASSIGN 2024-11-28T00:25:16,588 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:25:16,588 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:25:16,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:25:16,739 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:25:16,739 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=7e74f876de2bcc196d763b50943e18f2, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:16,739 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=875b43592001846889c7b989a9e36642, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:25:16,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, ASSIGN because future has completed 2024-11-28T00:25:16,747 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:16,751 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, ASSIGN because future has completed 2024-11-28T00:25:16,761 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:25:16,915 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:16,927 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,928 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7752): Opening region: {ENCODED => 7e74f876de2bcc196d763b50943e18f2, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:25:16,928 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. service=AccessControlService 2024-11-28T00:25:16,929 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:16,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:16,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7794): checking encryption for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(7797): checking classloading for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,933 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50767, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:16,933 INFO [StoreOpener-7e74f876de2bcc196d763b50943e18f2-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,940 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,940 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7752): Opening region: {ENCODED => 875b43592001846889c7b989a9e36642, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:25:16,940 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. service=AccessControlService 2024-11-28T00:25:16,941 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:16,941 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSplitRegion 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,941 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:16,941 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7794): checking encryption for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,941 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(7797): checking classloading for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,942 INFO [StoreOpener-7e74f876de2bcc196d763b50943e18f2-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7e74f876de2bcc196d763b50943e18f2 columnFamilyName cf 2024-11-28T00:25:16,943 INFO [StoreOpener-875b43592001846889c7b989a9e36642-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,946 INFO [StoreOpener-875b43592001846889c7b989a9e36642-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 875b43592001846889c7b989a9e36642 columnFamilyName cf 2024-11-28T00:25:16,949 DEBUG [StoreOpener-875b43592001846889c7b989a9e36642-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:16,949 DEBUG [StoreOpener-7e74f876de2bcc196d763b50943e18f2-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:16,949 INFO [StoreOpener-7e74f876de2bcc196d763b50943e18f2-1 {}] regionserver.HStore(327): Store=7e74f876de2bcc196d763b50943e18f2/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:16,949 INFO [StoreOpener-875b43592001846889c7b989a9e36642-1 {}] regionserver.HStore(327): Store=875b43592001846889c7b989a9e36642/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:16,950 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1038): replaying wal for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,950 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1038): replaying wal for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,951 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,952 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,952 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,952 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,953 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1048): stopping wal replay for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,953 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1060): Cleaning up temporary data for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,953 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1048): stopping wal replay for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,953 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1060): Cleaning up temporary data for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,955 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1093): writing seq id for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,956 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1093): writing seq id for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,959 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:16,960 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1114): Opened 875b43592001846889c7b989a9e36642; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59624701, jitterRate=-0.11152271926403046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:16,960 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 875b43592001846889c7b989a9e36642 2024-11-28T00:25:16,961 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:16,961 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegion(1006): Region open journal for 875b43592001846889c7b989a9e36642: Running coprocessor pre-open hook at 1732753516941Writing region info on filesystem at 1732753516941Initializing all the Stores at 1732753516943 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753516943Cleaning up temporary data from old regions at 1732753516953 (+10 ms)Running coprocessor post-open hooks at 1732753516960 (+7 ms)Region opened successfully at 1732753516961 (+1 ms) 2024-11-28T00:25:16,962 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1114): Opened 7e74f876de2bcc196d763b50943e18f2; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67882045, jitterRate=0.011521294713020325}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:16,962 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:16,962 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegion(1006): Region open journal for 7e74f876de2bcc196d763b50943e18f2: Running coprocessor pre-open hook at 1732753516929Writing region info on filesystem at 1732753516930 (+1 ms)Initializing all the Stores at 1732753516931 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753516932 (+1 ms)Cleaning up temporary data from old regions at 1732753516953 (+21 ms)Running coprocessor post-open hooks at 1732753516962 (+9 ms)Region opened successfully at 1732753516962 2024-11-28T00:25:16,963 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642., pid=11, masterSystemTime=1732753516915 2024-11-28T00:25:16,964 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2., pid=10, masterSystemTime=1732753516903 2024-11-28T00:25:16,967 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,967 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=11}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:16,969 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=9 updating hbase:meta row=875b43592001846889c7b989a9e36642, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:25:16,971 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,971 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=10}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:16,972 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=7e74f876de2bcc196d763b50943e18f2, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:16,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=9, state=RUNNABLE, hasLock=false; OpenRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:25:16,977 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=72c8c7f04009,36427,1732753506332, table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:25:16,977 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=72c8c7f04009,43335,1732753506004, table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-28T00:25:16,979 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=8, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:25:16,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=9 2024-11-28T00:25:16,987 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=9, state=SUCCESS, hasLock=false; OpenRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004 in 218 msec 2024-11-28T00:25:16,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=8 2024-11-28T00:25:16,989 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=8, state=SUCCESS, hasLock=false; OpenRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332 in 237 msec 2024-11-28T00:25:16,991 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, ASSIGN in 404 msec 2024-11-28T00:25:16,994 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-28T00:25:16,995 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, ASSIGN in 406 msec 2024-11-28T00:25:16,996 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:25:16,997 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753516996"}]},"ts":"1732753516996"} 2024-11-28T00:25:17,000 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-28T00:25:17,002 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=7, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:25:17,005 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-28T00:25:17,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:25:17,017 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:25:17,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,022 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43253, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,027 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:17,027 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:17,028 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,029 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50723, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:25:17,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,035 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54529, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,037 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:25:17,063 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:17,068 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:17,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:25:17,076 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:17,081 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:17,087 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 706 msec 2024-11-28T00:25:17,090 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:17,098 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:17,103 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:17,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-28T00:25:17,522 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:17,526 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:17,533 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:17,534 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:17,535 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:25:17,538 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:17,556 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:17,565 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,567 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38860, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,569 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:17,583 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753517584 (current time:1732753517584). 2024-11-28T00:25:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:25:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:25:17,584 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:25:17,586 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16bf9b67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:17,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:17,588 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:17,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:17,588 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:17,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9afd393, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:17,589 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:17,590 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,591 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39016, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:17,593 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7a15b624, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:17,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:17,595 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,601 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:25:17,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:17,607 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,608 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,608 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:17,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e182c56, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:17,610 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:17,611 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:17,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:17,611 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:17,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a138da9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:17,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:17,612 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,613 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39026, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:17,614 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a3dd9a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:17,615 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:17,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:17,616 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,617 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:25:17,620 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:17,621 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58928, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:17,623 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:25:17,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:17,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:17,623 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:17,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:25:17,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:25:17,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:17,637 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-28T00:25:17,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:25:17,640 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:25:17,652 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:25:17,666 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:25:17,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:25:17,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:25:17,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741845_1021 (size=215) 2024-11-28T00:25:17,683 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:25:17,686 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2}, {pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642}] 2024-11-28T00:25:17,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:17,692 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 2024-11-28T00:25:17,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:25:17,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=14 2024-11-28T00:25:17,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=13 2024-11-28T00:25:17,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:17,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:17,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.HRegion(2603): Flush status journal for 7e74f876de2bcc196d763b50943e18f2: 2024-11-28T00:25:17,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 875b43592001846889c7b989a9e36642: 2024-11-28T00:25:17,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:17,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. for emptySnaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:17,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:17,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:17,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:17,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:17,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:25:17,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:25:17,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:25:17,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:25:17,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741846_1022 (size=86) 2024-11-28T00:25:17,889 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:17,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:25:17,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:25:17,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741847_1023 (size=86) 2024-11-28T00:25:17,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-28T00:25:17,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:17,893 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=13}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=13 2024-11-28T00:25:17,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=13 2024-11-28T00:25:17,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-28T00:25:17,895 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:17,895 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSplitRegion on region 875b43592001846889c7b989a9e36642 2024-11-28T00:25:17,895 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=14, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 2024-11-28T00:25:17,895 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=13, ppid=12, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:17,900 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 in 211 msec 2024-11-28T00:25:17,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=12 2024-11-28T00:25:17,904 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=12, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 in 211 msec 2024-11-28T00:25:17,904 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:25:17,908 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:25:17,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:25:17,911 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:25:17,912 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:17,913 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:25:17,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:25:17,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:25:17,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741848_1024 (size=78) 2024-11-28T00:25:17,934 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:25:17,934 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:17,938 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:17,962 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:25:17,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:25:17,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:25:17,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741849_1025 (size=713) 2024-11-28T00:25:17,991 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:25:18,008 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:25:18,010 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,018 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=12, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:25:18,018 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 12 2024-11-28T00:25:18,022 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=12, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 386 msec 2024-11-28T00:25:18,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=12 2024-11-28T00:25:18,272 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:18,295 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:25:18,300 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:25:18,305 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:18,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,310 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:18,310 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:25:18,313 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:18,321 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:18,330 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSplitRegion,, stopping at row=testtb-testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:18,334 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753518334 (current time:1732753518334). 2024-11-28T00:25:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:25:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:25:18,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:25:18,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b8b594f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:18,335 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:18,336 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:18,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:18,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:18,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@385886fe, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:18,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:18,337 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,338 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39046, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:18,339 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e40614a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:18,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:18,341 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:18,342 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49564, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:18,344 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:25:18,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:18,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,344 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75434f2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:18,345 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:18,346 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:18,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:18,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:18,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@da3316a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:18,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:18,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,348 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39060, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:18,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74cd8adc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:18,350 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:18,351 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:18,352 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:18,353 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49566, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:18,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:25:18,355 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:18,356 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58936, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:18,358 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:25:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:18,359 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:18,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:25:18,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:25:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:18,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-28T00:25:18,363 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:25:18,363 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:25:18,365 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:25:18,369 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:25:18,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:25:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:25:18,384 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741850_1026 (size=210) 2024-11-28T00:25:18,385 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:25:18,386 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2}, {pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642}] 2024-11-28T00:25:18,387 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:18,387 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 2024-11-28T00:25:18,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:25:18,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=17 2024-11-28T00:25:18,540 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=16 2024-11-28T00:25:18,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:18,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:18,545 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2902): Flushing 7e74f876de2bcc196d763b50943e18f2 1/1 column families, dataSize=333 B heapSize=976 B 2024-11-28T00:25:18,546 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2902): Flushing 875b43592001846889c7b989a9e36642 1/1 column families, dataSize=2.93 KB heapSize=6.58 KB 2024-11-28T00:25:18,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 is 71, key is 105a6ed5168111916903fdc6b3cf8f33/cf:q/1732753518300/Put/seqid=0 2024-11-28T00:25:18,633 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 is 71, key is 0238f1cc98401bc2029c4c80ee87d574/cf:q/1732753518294/Put/seqid=0 2024-11-28T00:25:18,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741851_1027 (size=5242) 2024-11-28T00:25:18,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741852_1028 (size=8031) 2024-11-28T00:25:18,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741851_1027 (size=5242) 2024-11-28T00:25:18,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741851_1027 (size=5242) 2024-11-28T00:25:18,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741852_1028 (size=8031) 2024-11-28T00:25:18,664 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741852_1028 (size=8031) 2024-11-28T00:25:18,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:18,670 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:18,682 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:25:18,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 2024-11-28T00:25:18,738 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:18,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/.tmp/cf/a3f5883589fd4ed8be2745857322dd92, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=875b43592001846889c7b989a9e36642] 2024-11-28T00:25:18,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/.tmp/cf/c9bd37999cd5409892cf834931c29052, store: [table=testtb-testExportFileSystemStateWithSplitRegion family=cf region=7e74f876de2bcc196d763b50943e18f2] 2024-11-28T00:25:18,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/.tmp/cf/a3f5883589fd4ed8be2745857322dd92 is 224, key is 1426471478c6c9f7775c642688c576eec/cf:q/1732753518300/Put/seqid=0 2024-11-28T00:25:18,824 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/.tmp/cf/c9bd37999cd5409892cf834931c29052 is 224, key is 0dca6457e5591a5789ec6f9a832d94701/cf:q/1732753518294/Put/seqid=0 2024-11-28T00:25:18,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741854_1030 (size=6418) 2024-11-28T00:25:18,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741854_1030 (size=6418) 2024-11-28T00:25:18,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741854_1030 (size=6418) 2024-11-28T00:25:18,862 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=333, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/.tmp/cf/c9bd37999cd5409892cf834931c29052 2024-11-28T00:25:18,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741853_1029 (size=15277) 2024-11-28T00:25:18,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741853_1029 (size=15277) 2024-11-28T00:25:18,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741853_1029 (size=15277) 2024-11-28T00:25:18,881 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=2.9 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/.tmp/cf/a3f5883589fd4ed8be2745857322dd92 2024-11-28T00:25:18,896 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/.tmp/cf/a3f5883589fd4ed8be2745857322dd92 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92 2024-11-28T00:25:18,898 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/.tmp/cf/c9bd37999cd5409892cf834931c29052 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052 2024-11-28T00:25:18,908 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92, entries=45, sequenceid=6, filesize=14.9 K 2024-11-28T00:25:18,911 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052, entries=5, sequenceid=6, filesize=6.3 K 2024-11-28T00:25:18,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(3140): Finished flush of dataSize ~2.93 KB/3003, heapSize ~6.56 KB/6720, currentSize=0 B/0 for 875b43592001846889c7b989a9e36642 in 371ms, sequenceid=6, compaction requested=false 2024-11-28T00:25:18,919 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(3140): Finished flush of dataSize ~333 B/333, heapSize ~960 B/960, currentSize=0 B/0 for 7e74f876de2bcc196d763b50943e18f2 in 372ms, sequenceid=6, compaction requested=false 2024-11-28T00:25:18,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-28T00:25:18,919 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSplitRegion' 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.HRegion(2603): Flush status journal for 7e74f876de2bcc196d763b50943e18f2: 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.HRegion(2603): Flush status journal for 875b43592001846889c7b989a9e36642: 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. for snaptb0-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,920 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:18,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:18,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052] hfiles 2024-11-28T00:25:18,921 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92] hfiles 2024-11-28T00:25:18,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,924 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92 for snapshot=snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:18,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:25:18,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:25:18,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:25:18,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741856_1032 (size=125) 2024-11-28T00:25:18,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:25:18,997 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=17}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=17 2024-11-28T00:25:18,998 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=17 2024-11-28T00:25:18,998 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 875b43592001846889c7b989a9e36642 2024-11-28T00:25:18,999 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=17, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 2024-11-28T00:25:19,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=17, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 875b43592001846889c7b989a9e36642 in 617 msec 2024-11-28T00:25:19,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741855_1031 (size=125) 2024-11-28T00:25:19,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741855_1031 (size=125) 2024-11-28T00:25:19,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741855_1031 (size=125) 2024-11-28T00:25:19,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:25:19,019 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=16}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=16 2024-11-28T00:25:19,020 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=16 2024-11-28T00:25:19,020 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSplitRegion on region 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:19,021 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=16, ppid=15, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:19,027 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=16, resume processing ppid=15 2024-11-28T00:25:19,028 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=16, ppid=15, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 7e74f876de2bcc196d763b50943e18f2 in 638 msec 2024-11-28T00:25:19,028 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:25:19,029 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:25:19,033 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:25:19,033 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:25:19,033 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:19,045 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2] hfiles 2024-11-28T00:25:19,045 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 2024-11-28T00:25:19,045 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:25:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:25:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:25:19,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741857_1033 (size=309) 2024-11-28T00:25:19,092 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:25:19,093 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:19,095 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:19,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:25:19,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:25:19,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741858_1034 (size=1023) 2024-11-28T00:25:19,185 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:25:19,199 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:25:19,199 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSplitRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:19,203 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=15, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:25:19,203 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 15 2024-11-28T00:25:19,206 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=15, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=15, snapshot={ ss=snaptb0-testExportFileSystemStateWithSplitRegion table=testtb-testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 843 msec 2024-11-28T00:25:19,501 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=15 2024-11-28T00:25:19,502 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:19,524 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:19,525 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:19,526 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:25:19,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49574, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:19,527 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58940, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:19,528 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41621 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:25:19,529 INFO [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:25:19,529 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:25:19,529 INFO [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.CompactSplit(323): Interrupting running compactions because user switched off compactions 2024-11-28T00:25:19,532 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportFileSystemStateWithSplitRegion', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:25:19,534 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:19,535 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:25:19,535 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:19,536 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportFileSystemStateWithSplitRegion" procId is: 18 2024-11-28T00:25:19,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:25:19,537 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:25:19,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:25:19,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:25:19,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741859_1035 (size=390) 2024-11-28T00:25:19,557 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0fd1f594fbf245ea623b9011d451f99d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='testExportFileSystemStateWithSplitRegion', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:19,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:25:19,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:25:19,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741860_1036 (size=75) 2024-11-28T00:25:19,569 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:19,570 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1722): Closing 0fd1f594fbf245ea623b9011d451f99d, disabling compactions & flushes 2024-11-28T00:25:19,570 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,570 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,570 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. after waiting 0 ms 2024-11-28T00:25:19,570 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,570 INFO [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,570 DEBUG [RegionOpenAndInit-testExportFileSystemStateWithSplitRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0fd1f594fbf245ea623b9011d451f99d: Waiting for close lock at 1732753519569Disabling compacts and flushes for region at 1732753519569Disabling writes for close at 1732753519570 (+1 ms)Writing region close event to WAL at 1732753519570Closed at 1732753519570 2024-11-28T00:25:19,572 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:25:19,572 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732753519572"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753519572"}]},"ts":"1732753519572"} 2024-11-28T00:25:19,576 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-28T00:25:19,577 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:25:19,577 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753519577"}]},"ts":"1732753519577"} 2024-11-28T00:25:19,580 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLING in hbase:meta 2024-11-28T00:25:19,580 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:25:19,582 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:25:19,582 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:25:19,582 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:25:19,582 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:25:19,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, ASSIGN}] 2024-11-28T00:25:19,584 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, ASSIGN 2024-11-28T00:25:19,588 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:25:19,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:25:19,739 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:25:19,740 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=0fd1f594fbf245ea623b9011d451f99d, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:19,745 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=19, ppid=18, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, ASSIGN because future has completed 2024-11-28T00:25:19,761 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:19,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:25:19,920 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,920 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7752): Opening region: {ENCODED => 0fd1f594fbf245ea623b9011d451f99d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:25:19,921 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. service=AccessControlService 2024-11-28T00:25:19,921 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:19,921 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,921 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:19,922 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7794): checking encryption for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,922 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(7797): checking classloading for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,924 INFO [StoreOpener-0fd1f594fbf245ea623b9011d451f99d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,926 INFO [StoreOpener-0fd1f594fbf245ea623b9011d451f99d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0fd1f594fbf245ea623b9011d451f99d columnFamilyName cf 2024-11-28T00:25:19,926 DEBUG [StoreOpener-0fd1f594fbf245ea623b9011d451f99d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:19,927 INFO [StoreOpener-0fd1f594fbf245ea623b9011d451f99d-1 {}] regionserver.HStore(327): Store=0fd1f594fbf245ea623b9011d451f99d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:19,927 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1038): replaying wal for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,928 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1048): stopping wal replay for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,929 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1060): Cleaning up temporary data for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,931 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1093): writing seq id for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,934 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:25:19,935 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1114): Opened 0fd1f594fbf245ea623b9011d451f99d; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59184956, jitterRate=-0.118075430393219}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:19,935 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:19,935 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegion(1006): Region open journal for 0fd1f594fbf245ea623b9011d451f99d: Running coprocessor pre-open hook at 1732753519922Writing region info on filesystem at 1732753519922Initializing all the Stores at 1732753519923 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753519923Cleaning up temporary data from old regions at 1732753519929 (+6 ms)Running coprocessor post-open hooks at 1732753519935 (+6 ms)Region opened successfully at 1732753519935 2024-11-28T00:25:19,937 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., pid=20, masterSystemTime=1732753519915 2024-11-28T00:25:19,940 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,940 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=20}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:19,942 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=19 updating hbase:meta row=0fd1f594fbf245ea623b9011d451f99d, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:19,945 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=20, ppid=19, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:25:19,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=20, resume processing ppid=19 2024-11-28T00:25:19,950 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=20, ppid=19, state=SUCCESS, hasLock=false; OpenRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332 in 187 msec 2024-11-28T00:25:19,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=19, resume processing ppid=18 2024-11-28T00:25:19,954 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=19, ppid=18, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, ASSIGN in 368 msec 2024-11-28T00:25:19,955 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:25:19,955 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753519955"}]},"ts":"1732753519955"} 2024-11-28T00:25:19,957 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=ENABLED in hbase:meta 2024-11-28T00:25:19,959 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=18, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:25:19,959 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testExportFileSystemStateWithSplitRegion jenkins: RWXCA 2024-11-28T00:25:19,963 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:25:19,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:19,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:19,966 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:19,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:25:19,968 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:19,969 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,971 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,972 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,972 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=18, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportFileSystemStateWithSplitRegion in 437 msec 2024-11-28T00:25:19,972 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF\x0AN\x0A\x07jenkins\x12C\x08\x03"?\x0A3\x0A\x07default\x12(testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:19,973 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:25:20,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=18 2024-11-28T00:25:20,161 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:20,162 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:20,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-28T00:25:20,526 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportFileSystemStateWithSplitRegion' 2024-11-28T00:25:22,438 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:25:23,977 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:25:23,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:25:23,978 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741861_1037 (size=134217728) 2024-11-28T00:25:25,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:25,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:25:25,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:25,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:25:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:25:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:25:26,416 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741862_1038 (size=134217728) 2024-11-28T00:25:27,251 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file is 35, key is 1\x00\x00\x00/cf:q/1732753520170/Put/seqid=0 2024-11-28T00:25:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:25:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:25:27,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741863_1039 (size=51979256) 2024-11-28T00:25:27,276 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@67f49214, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:27,277 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:27,277 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:27,280 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:27,280 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:27,281 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:27,281 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e91c26c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:27,281 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:27,282 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:27,284 DEBUG [RPCClient-NioEventLoopGroup-6-9 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:27,286 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33456, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:27,287 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6d746a97, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:27,288 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:27,290 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:27,290 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:27,293 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:27,313 WARN [Time-limited test {}] tool.BulkLoadHFilesTool$1(330): Trying to bulk load hfile hdfs://localhost:43901/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-28T00:25:27,313 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-28T00:25:27,315 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.AsyncConnectionImpl(321): The fetched master address is 72c8c7f04009,39673,1732753505055 2024-11-28T00:25:27,315 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@7256331 2024-11-28T00:25:27,315 DEBUG [RPCClient-NioEventLoopGroup-6-10 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-28T00:25:27,322 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33458, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-28T00:25:27,332 WARN [IPC Server handler 4 on default port 43901 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-28T00:25:27,339 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2] 2024-11-28T00:25:27,351 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:27,412 INFO [BulkLoadHFilesTool-0 {}] tool.BulkLoadHFilesTool(704): Trying to load hfile=hdfs://localhost:43901/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file first=Optional[1\x00\x00\x00] last=Optional[9\x00\x00\x00] 2024-11-28T00:25:27,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:27,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:27,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:27,456 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60739, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=ClientService 2024-11-28T00:25:27,458 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] ipc.MetricsHBaseServer(152): Unknown exception type org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-28T00:25:27,464 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] ipc.CallRunner(138): callId: 187 service: ClientService methodName: ExecService size: 101 connection: 172.17.0.2:60739 deadline: 1732753587456, exception=org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 2024-11-28T00:25:27,466 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.SecureBulkLoadManager(227): unable to add token java.util.concurrent.ExecutionException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at java.util.concurrent.CompletableFuture.reportGet(CompletableFuture.java:396) ~[?:?] at java.util.concurrent.CompletableFuture.get(CompletableFuture.java:2073) ~[?:?] at org.apache.hadoop.hbase.regionserver.SecureBulkLoadManager.secureBulkLoadHFiles(SecureBulkLoadManager.java:221) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.bulkLoadHFile(RSRpcServices.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43510) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] Caused by: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.instantiateException(RemoteWithExtrasException.java:110) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RemoteWithExtrasException.unwrapRemoteException(RemoteWithExtrasException.java:100) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.translateException(ConnectionUtils.java:219) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.onError(AsyncRpcRetryingCaller.java:165) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$call$4(AsyncSingleRequestRpcRetryingCaller.java:86) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture$UniWhenComplete.tryFire(CompletableFuture.java:841) ~[?:?] at java.util.concurrent.CompletableFuture.postComplete(CompletableFuture.java:510) ~[?:?] at java.util.concurrent.CompletableFuture.completeExceptionally(CompletableFuture.java:2162) ~[?:?] at org.apache.hadoop.hbase.client.RegionCoprocessorRpcChannelImpl.lambda$rpcCall$0(RegionCoprocessorRpcChannelImpl.java:90) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:56) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hbase.thirdparty.com.google.protobuf.RpcUtil$1.run(RpcUtil.java:47) ~[hbase-shaded-protobuf-4.1.9.jar:4.1.9] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:397) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.RemoteWithExtrasException: org.apache.hadoop.hbase.exceptions.UnknownProtocolException: No registered coprocessor service found for AuthenticationService in region hbase:meta,,1 at org.apache.hadoop.hbase.regionserver.HRegion.execService(HRegion.java:8304) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execServiceOnRegion(RSRpcServices.java:2441) at org.apache.hadoop.hbase.regionserver.RSRpcServices.execService(RSRpcServices.java:2415) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43516) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:392) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcConnection.readResponse(RpcConnection.java:457) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.readResponse(NettyRpcDuplexHandler.java:125) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.NettyRpcDuplexHandler.channelRead(NettyRpcDuplexHandler.java:140) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.fireChannelRead(ByteToMessageDecoder.java:346) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.codec.ByteToMessageDecoder.channelRead(ByteToMessageDecoder.java:318) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:444) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.handler.timeout.IdleStateHandler.channelRead(IdleStateHandler.java:289) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:442) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.fireChannelRead(AbstractChannelHandlerContext.java:412) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline$HeadContext.channelRead(DefaultChannelPipeline.java:1357) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:440) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannelHandlerContext.invokeChannelRead(AbstractChannelHandlerContext.java:420) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.DefaultChannelPipeline.fireChannelRead(DefaultChannelPipeline.java:868) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.AbstractEpollStreamChannel$EpollStreamUnsafe.epollInReady(AbstractEpollStreamChannel.java:799) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.processReady(EpollEventLoop.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:399) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:25:27,477 WARN [IPC Server handler 1 on default port 43901 {}] namenode.FSNamesystem(6314): trying to get DT with no secret manager running 2024-11-28T00:25:27,524 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(614): Validating hfile at hdfs://localhost:43901/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file for inclusion in 0fd1f594fbf245ea623b9011d451f99d/cf 2024-11-28T00:25:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(626): HFile bounds: first=1\x00\x00\x00 last=9\x00\x00\x00 2024-11-28T00:25:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(628): Region bounds: first= last= 2024-11-28T00:25:27,537 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(641): Trying to bulk load hfile hdfs://localhost:43901/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file with size: 320414712 bytes can be problematic as it may lead to oversplitting. 2024-11-28T00:25:27,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(2603): Flush status journal for 0fd1f594fbf245ea623b9011d451f99d: 2024-11-28T00:25:27,539 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(397): Moving hdfs://localhost:43901/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/output/cf/test_file to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/staging/jenkins__testExportFileSystemStateWithSplitRegion__pl3ngsqd5i123k6agis7hd61onjjubgabjotq3msi733ifanag3pj564ooo3d2jm/cf/test_file 2024-11-28T00:25:27,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/staging/jenkins__testExportFileSystemStateWithSplitRegion__pl3ngsqd5i123k6agis7hd61onjjubgabjotq3msi733ifanag3pj564ooo3d2jm/cf/test_file as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ 2024-11-28T00:25:27,547 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(700): Loaded HFile hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/staging/jenkins__testExportFileSystemStateWithSplitRegion__pl3ngsqd5i123k6agis7hd61onjjubgabjotq3msi733ifanag3pj564ooo3d2jm/cf/test_file into 0fd1f594fbf245ea623b9011d451f99d/cf as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ - updating store file list. 2024-11-28T00:25:27,562 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 56258cb69c564f4db0d329bb9cb9d267_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:25:27,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(722): Loaded HFile hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ into 0fd1f594fbf245ea623b9011d451f99d/cf 2024-11-28T00:25:27,563 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HStore(706): Successfully loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/staging/jenkins__testExportFileSystemStateWithSplitRegion__pl3ngsqd5i123k6agis7hd61onjjubgabjotq3msi733ifanag3pj564ooo3d2jm/cf/test_file into 0fd1f594fbf245ea623b9011d451f99d/cf (new location: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_) 2024-11-28T00:25:27,565 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.SecureBulkLoadManager$SecureBulkLoadListener(412): Bulk Load done for: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/staging/jenkins__testExportFileSystemStateWithSplitRegion__pl3ngsqd5i123k6agis7hd61onjjubgabjotq3msi733ifanag3pj564ooo3d2jm/cf/test_file 2024-11-28T00:25:27,577 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:25:27,577 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.bulkLoad(BulkLoadHFilesTool.java:1125) at org.apache.hadoop.hbase.tool.BulkLoadHFilesTool.run(BulkLoadHFilesTool.java:1176) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemStateWithSplitRegion(TestExportSnapshot.java:229) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:25:27,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:27,578 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:27,578 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:27,578 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:27,579 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(64): Try updating region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2 , the old value is region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2, error=org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Call to address=72c8c7f04009:36427 failed on local exception: org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-28T00:25:27,580 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2 is org.apache.hadoop.hbase.exceptions.ConnectionClosedException: Connection closed 2024-11-28T00:25:27,580 DEBUG [RPCClient-NioEventLoopGroup-6-11 {}] client.AsyncRegionLocatorHelper(88): Try removing region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2 from cache 2024-11-28T00:25:27,584 WARN [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] util.NettyFutureUtils(65): IO operation failed org.apache.hbase.thirdparty.io.netty.channel.StacklessClosedChannelException: null at org.apache.hbase.thirdparty.io.netty.channel.AbstractChannel$AbstractUnsafe.write(Object, ChannelPromise)(Unknown Source) ~[hbase-shaded-netty-4.1.9.jar:?] 2024-11-28T00:25:27,588 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testExportFileSystemStateWithSplitRegion', row='5', locateType=CURRENT is [region=testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d., hostname=72c8c7f04009,36427,1732753506332, seqNum=2] 2024-11-28T00:25:27,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$3(2313): Client=jenkins//172.17.0.2 split testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:27,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:27,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0fd1f594fbf245ea623b9011d451f99d, daughterA=2301728e3e74ca9b7c47c3329302d12d, daughterB=83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:27,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0fd1f594fbf245ea623b9011d451f99d, daughterA=2301728e3e74ca9b7c47c3329302d12d, daughterB=83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:27,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0fd1f594fbf245ea623b9011d451f99d, daughterA=2301728e3e74ca9b7c47c3329302d12d, daughterB=83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:27,615 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=21, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0fd1f594fbf245ea623b9011d451f99d, daughterA=2301728e3e74ca9b7c47c3329302d12d, daughterB=83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:27,618 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:25:27,623 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, UNASSIGN}] 2024-11-28T00:25:27,625 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, UNASSIGN 2024-11-28T00:25:27,628 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=0fd1f594fbf245ea623b9011d451f99d, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:27,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=22, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, UNASSIGN because future has completed 2024-11-28T00:25:27,633 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:25:27,633 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:27,692 WARN [Async-Client-Retry-Timer-pool-0 {}] client.AsyncNonMetaRegionLocator(265): Failed to locate region in 'testExportFileSystemStateWithSplitRegion', row='', locateType=CURRENT org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=72c8c7f04009:36427 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$RpcChannelImplementation.callMethod(AbstractRpcClient.java:628) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$Stub.scan(ClientProtos.java:43851) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.callOpenScanner(AsyncClientScanner.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.call(AsyncSingleRequestRpcRetryingCaller.java:84) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.lambda$doCall$7(AsyncSingleRequestRpcRetryingCaller.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.FutureUtils.lambda$addListener$0(FutureUtils.java:71) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at java.util.concurrent.CompletableFuture.uniWhenComplete(CompletableFuture.java:863) ~[?:?] at java.util.concurrent.CompletableFuture.uniWhenCompleteStage(CompletableFuture.java:887) ~[?:?] at java.util.concurrent.CompletableFuture.whenComplete(CompletableFuture.java:2325) ~[?:?] at org.apache.hadoop.hbase.util.FutureUtils.addListener(FutureUtils.java:64) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:108) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.call(AsyncRpcRetryingCaller.java:222) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCallerFactory$SingleRequestCallerBuilder.call(AsyncRpcRetryingCallerFactory.java:177) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:242) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.ConnectionUtils.timelineConsistentRead(ConnectionUtils.java:442) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.openScanner(AsyncClientScanner.java:255) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncClientScanner.start(AsyncClientScanner.java:275) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:617) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.RawAsyncTableImpl.scan(RawAsyncTableImpl.java:91) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.locateInMeta(AsyncNonMetaRegionLocator.java:408) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocationsInternal(AsyncNonMetaRegionLocator.java:516) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncNonMetaRegionLocator.getRegionLocations(AsyncNonMetaRegionLocator.java:529) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.lambda$getRegionLocation$7(AsyncRegionLocator.java:164) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.tracedLocationFuture(AsyncRegionLocator.java:106) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:158) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRegionLocator.getRegionLocation(AsyncRegionLocator.java:193) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncSingleRequestRpcRetryingCaller.doCall(AsyncSingleRequestRpcRetryingCaller.java:109) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.client.AsyncRpcRetryingCaller.lambda$tryScheduleRetry$1(AsyncRpcRetryingCaller.java:139) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.run(HashedWheelTimer.java:713) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.concurrent.ImmediateExecutor.execute(ImmediateExecutor.java:34) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:701) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:788) ~[hbase-shaded-netty-4.1.9.jar:?] at org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:501) ~[hbase-shaded-netty-4.1.9.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 34 more 2024-11-28T00:25:27,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:25:27,795 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(122): Close 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:27,795 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:25:27,796 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1722): Closing 0fd1f594fbf245ea623b9011d451f99d, disabling compactions & flushes 2024-11-28T00:25:27,796 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:27,796 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:27,796 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. after waiting 0 ms 2024-11-28T00:25:27,796 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:27,819 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=1 2024-11-28T00:25:27,824 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:25:27,824 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d. 2024-11-28T00:25:27,824 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] regionserver.HRegion(1676): Region close journal for 0fd1f594fbf245ea623b9011d451f99d: Waiting for close lock at 1732753527796Running coprocessor pre-close hooks at 1732753527796Disabling compacts and flushes for region at 1732753527796Disabling writes for close at 1732753527796Writing region close event to WAL at 1732753527804 (+8 ms)Running coprocessor post-close hooks at 1732753527820 (+16 ms)Closed at 1732753527824 (+4 ms) 2024-11-28T00:25:27,831 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=23}] handler.UnassignRegionHandler(157): Closed 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:27,833 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=22 updating hbase:meta row=0fd1f594fbf245ea623b9011d451f99d, regionState=CLOSED 2024-11-28T00:25:27,836 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=23, ppid=22, state=RUNNABLE, hasLock=false; CloseRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:25:27,844 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=23, resume processing ppid=22 2024-11-28T00:25:27,844 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=23, ppid=22, state=SUCCESS, hasLock=false; CloseRegionProcedure 0fd1f594fbf245ea623b9011d451f99d, server=72c8c7f04009,36427,1732753506332 in 206 msec 2024-11-28T00:25:27,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=22, resume processing ppid=21 2024-11-28T00:25:27,848 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=22, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=0fd1f594fbf245ea623b9011d451f99d, UNASSIGN in 221 msec 2024-11-28T00:25:27,863 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:27,868 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=21 splitting 1 storefiles, region=0fd1f594fbf245ea623b9011d451f99d, threads=1 2024-11-28T00:25:27,877 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=21 splitting started for store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ for region: 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:27,889 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 56258cb69c564f4db0d329bb9cb9d267_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:25:27,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:25:27,983 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:25:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:25:27,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741864_1040 (size=21) 2024-11-28T00:25:27,996 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 56258cb69c564f4db0d329bb9cb9d267_SeqId_4_: NONE, but ROW specified in column family configuration 2024-11-28T00:25:28,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:25:28,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:25:28,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741865_1041 (size=21) 2024-11-28T00:25:28,046 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=21 splitting complete for store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ for region: 0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:25:28,049 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=21 split storefiles for region 0fd1f594fbf245ea623b9011d451f99d Daughter A: [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d] storefiles, Daughter B: [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d] storefiles. 2024-11-28T00:25:28,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:25:28,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:25:28,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741866_1042 (size=76) 2024-11-28T00:25:28,099 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:28,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:25:28,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:25:28,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741867_1043 (size=76) 2024-11-28T00:25:28,147 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:28,176 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-28T00:25:28,179 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/recovered.edits/6.seqid, newMaxSeqId=6, maxSeqId=-1 2024-11-28T00:25:28,184 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.","families":{"info":[{"qualifier":"regioninfo","vlen":74,"tag":[],"timestamp":"1732753528183"},{"qualifier":"splitA","vlen":75,"tag":[],"timestamp":"1732753528183"},{"qualifier":"splitB","vlen":75,"tag":[],"timestamp":"1732753528183"}]},"ts":"1732753528183"} 2024-11-28T00:25:28,184 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732753528183"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753528183"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732753528183"}]},"ts":"1732753528183"} 2024-11-28T00:25:28,184 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732753528183"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753528183"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732753528183"}]},"ts":"1732753528183"} 2024-11-28T00:25:28,204 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, ASSIGN}, {pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, ASSIGN}] 2024-11-28T00:25:28,206 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, ASSIGN 2024-11-28T00:25:28,207 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, ASSIGN 2024-11-28T00:25:28,208 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, ASSIGN; state=SPLITTING_NEW, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:25:28,209 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, ASSIGN; state=SPLITTING_NEW, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:25:28,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:25:28,358 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:25:28,359 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=2301728e3e74ca9b7c47c3329302d12d, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:28,359 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=83f789a3ea94875e436296f93aa0d60a, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:28,363 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=24, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, ASSIGN because future has completed 2024-11-28T00:25:28,363 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:28,364 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=25, ppid=21, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, ASSIGN because future has completed 2024-11-28T00:25:28,365 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:25:28,522 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:25:28,522 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7752): Opening region: {ENCODED => 83f789a3ea94875e436296f93aa0d60a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.', STARTKEY => '5', ENDKEY => ''} 2024-11-28T00:25:28,523 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. service=AccessControlService 2024-11-28T00:25:28,523 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:28,524 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,524 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:28,524 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7794): checking encryption for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,524 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(7797): checking classloading for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,526 INFO [StoreOpener-83f789a3ea94875e436296f93aa0d60a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,527 INFO [StoreOpener-83f789a3ea94875e436296f93aa0d60a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 83f789a3ea94875e436296f93aa0d60a columnFamilyName cf 2024-11-28T00:25:28,527 DEBUG [StoreOpener-83f789a3ea94875e436296f93aa0d60a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:28,560 DEBUG [StoreFileOpener-83f789a3ea94875e436296f93aa0d60a-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d: NONE, but ROW specified in column family configuration 2024-11-28T00:25:28,575 DEBUG [StoreOpener-83f789a3ea94875e436296f93aa0d60a-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_-top 2024-11-28T00:25:28,576 INFO [StoreOpener-83f789a3ea94875e436296f93aa0d60a-1 {}] regionserver.HStore(327): Store=83f789a3ea94875e436296f93aa0d60a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:28,577 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1038): replaying wal for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,578 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,586 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,588 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1048): stopping wal replay for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,588 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1060): Cleaning up temporary data for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,591 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1093): writing seq id for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,592 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1114): Opened 83f789a3ea94875e436296f93aa0d60a; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59894173, jitterRate=-0.10750727355480194}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:28,592 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,593 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegion(1006): Region open journal for 83f789a3ea94875e436296f93aa0d60a: Running coprocessor pre-open hook at 1732753528524Writing region info on filesystem at 1732753528524Initializing all the Stores at 1732753528525 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753528526 (+1 ms)Cleaning up temporary data from old regions at 1732753528588 (+62 ms)Running coprocessor post-open hooks at 1732753528592 (+4 ms)Region opened successfully at 1732753528593 (+1 ms) 2024-11-28T00:25:28,595 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a., pid=27, masterSystemTime=1732753528517 2024-11-28T00:25:28,596 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.,because compaction is disabled. 2024-11-28T00:25:28,598 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:25:28,598 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=27}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:25:28,598 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(132): Open testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:25:28,599 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7752): Opening region: {ENCODED => 2301728e3e74ca9b7c47c3329302d12d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.', STARTKEY => '', ENDKEY => '5'} 2024-11-28T00:25:28,599 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=25 updating hbase:meta row=83f789a3ea94875e436296f93aa0d60a, regionState=OPEN, openSeqNum=7, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:28,599 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. service=AccessControlService 2024-11-28T00:25:28,599 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:25:28,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportFileSystemStateWithSplitRegion 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(898): Instantiated testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:25:28,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7794): checking encryption for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(7797): checking classloading for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,602 INFO [StoreOpener-2301728e3e74ca9b7c47c3329302d12d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,602 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=27, ppid=25, state=RUNNABLE, hasLock=false; OpenRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:25:28,604 INFO [StoreOpener-2301728e3e74ca9b7c47c3329302d12d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2301728e3e74ca9b7c47c3329302d12d columnFamilyName cf 2024-11-28T00:25:28,605 DEBUG [StoreOpener-2301728e3e74ca9b7c47c3329302d12d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:28,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=27, resume processing ppid=25 2024-11-28T00:25:28,607 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=27, ppid=25, state=SUCCESS, hasLock=false; OpenRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332 in 238 msec 2024-11-28T00:25:28,609 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=25, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, ASSIGN in 403 msec 2024-11-28T00:25:28,627 DEBUG [StoreFileOpener-2301728e3e74ca9b7c47c3329302d12d-cf-1 {}] regionserver.HStoreFile(483): HFile Bloom filter type for 56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d: NONE, but ROW specified in column family configuration 2024-11-28T00:25:28,630 DEBUG [StoreOpener-2301728e3e74ca9b7c47c3329302d12d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_-bottom 2024-11-28T00:25:28,631 INFO [StoreOpener-2301728e3e74ca9b7c47c3329302d12d-1 {}] regionserver.HStore(327): Store=2301728e3e74ca9b7c47c3329302d12d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:25:28,631 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1038): replaying wal for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,632 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,634 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,634 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1048): stopping wal replay for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,634 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1060): Cleaning up temporary data for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,636 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1093): writing seq id for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,638 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1114): Opened 2301728e3e74ca9b7c47c3329302d12d; next sequenceid=7; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63592513, jitterRate=-0.0523977130651474}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:25:28,638 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,638 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegion(1006): Region open journal for 2301728e3e74ca9b7c47c3329302d12d: Running coprocessor pre-open hook at 1732753528600Writing region info on filesystem at 1732753528600Initializing all the Stores at 1732753528602 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753528602Cleaning up temporary data from old regions at 1732753528634 (+32 ms)Running coprocessor post-open hooks at 1732753528638 (+4 ms)Region opened successfully at 1732753528638 2024-11-28T00:25:28,642 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d., pid=26, masterSystemTime=1732753528517 2024-11-28T00:25:28,642 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.CompactSplit(342): Ignoring compaction request for testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.,because compaction is disabled. 2024-11-28T00:25:28,645 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:25:28,646 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=26}] handler.AssignRegionHandler(153): Opened testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:25:28,647 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=24 updating hbase:meta row=2301728e3e74ca9b7c47c3329302d12d, regionState=OPEN, openSeqNum=7, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:25:28,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=26, ppid=24, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:25:28,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=26, resume processing ppid=24 2024-11-28T00:25:28,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=26, ppid=24, state=SUCCESS, hasLock=false; OpenRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332 in 290 msec 2024-11-28T00:25:28,664 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=21, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=testExportFileSystemStateWithSplitRegion, parent=0fd1f594fbf245ea623b9011d451f99d, daughterA=2301728e3e74ca9b7c47c3329302d12d, daughterB=83f789a3ea94875e436296f93aa0d60a in 1.0530 sec 2024-11-28T00:25:28,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=24, resume processing ppid=21 2024-11-28T00:25:28,665 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=24, ppid=21, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, ASSIGN in 453 msec 2024-11-28T00:25:28,752 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=21 2024-11-28T00:25:28,753 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SPLIT_REGION, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:28,753 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportFileSystemStateWithSplitRegion,, stopping at row=testExportFileSystemStateWithSplitRegion ,, for max=2147483647 with caching=100 2024-11-28T00:25:28,760 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753528761 (current time:1732753528761). 2024-11-28T00:25:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:25:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportFileSystemStateWithSplitRegion VERSION not specified, setting to 2 2024-11-28T00:25:28,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:25:28,765 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@766c65ec, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:28,766 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:28,767 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:28,767 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:28,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:28,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e279310, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:28,768 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:28,769 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,770 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33474, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:28,771 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@16015601, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:28,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:28,772 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:28,774 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33834, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:28,776 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:25:28,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:28,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,776 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,776 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e4afdef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:25:28,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:25:28,779 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:25:28,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:25:28,780 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:25:28,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@61cf215f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:25:28,783 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:25:28,784 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,785 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:25:28,786 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a7d7154, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:25:28,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:25:28,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:25:28,788 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:28,790 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33836, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:28,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportFileSystemStateWithSplitRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:25:28,797 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:25:28,799 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50734, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:25:28,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:25:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:25:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:25:28,801 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:25:28,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testExportFileSystemStateWithSplitRegion], kv [jenkins: RWXCA] 2024-11-28T00:25:28,802 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:25:28,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } 2024-11-28T00:25:28,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-28T00:25:28,806 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:25:28,807 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:25:28,809 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:25:28,812 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:25:28,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:25:28,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:25:28,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741868_1044 (size=197) 2024-11-28T00:25:28,843 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:25:28,844 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2301728e3e74ca9b7c47c3329302d12d}, {pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83f789a3ea94875e436296f93aa0d60a}] 2024-11-28T00:25:28,846 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:28,846 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:28,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:25:28,999 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=30 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:25:29,000 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=29 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.HRegion(2603): Flush status journal for 83f789a3ea94875e436296f93aa0d60a: 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.HRegion(2603): Flush status journal for 2301728e3e74ca9b7c47c3329302d12d: 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. for snapshot-testExportFileSystemStateWithSplitRegion completed. 2024-11-28T00:25:29,000 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(241): Storing 'testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.' region-info for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_-top] hfiles 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_-bottom] hfiles 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,001 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d for snapshot=snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,017 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:25:29,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:25:29,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741869_1045 (size=182) 2024-11-28T00:25:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:25:29,020 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=30}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=30 2024-11-28T00:25:29,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=30 2024-11-28T00:25:29,021 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:29,021 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=30, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:25:29,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:25:29,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:25:29,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741870_1046 (size=182) 2024-11-28T00:25:29,025 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=30, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 83f789a3ea94875e436296f93aa0d60a in 178 msec 2024-11-28T00:25:29,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:25:29,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=29}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=29 2024-11-28T00:25:29,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=29 2024-11-28T00:25:29,026 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportFileSystemStateWithSplitRegion on region 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:29,027 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=29, ppid=28, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:25:29,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=29, resume processing ppid=28 2024-11-28T00:25:29,030 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=29, ppid=28, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2301728e3e74ca9b7c47c3329302d12d in 184 msec 2024-11-28T00:25:29,030 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:25:29,032 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:25:29,032 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:25:29,032 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:25:29,034 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_] hfiles 2024-11-28T00:25:29,034 DEBUG [SplitRegionsSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ 2024-11-28T00:25:29,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:25:29,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:25:29,075 INFO [SplitRegionsSnapshotPool-pool-0 {}] procedure.SnapshotProcedure$1(378): take snapshot region={ENCODED => 0fd1f594fbf245ea623b9011d451f99d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.', STARTKEY => '', ENDKEY => '', OFFLINE => true, SPLIT => true}, table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,075 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741871_1047 (size=129) 2024-11-28T00:25:29,077 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:25:29,079 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:25:29,079 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,080 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,122 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:25:29,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:25:29,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:25:29,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741872_1048 (size=891) 2024-11-28T00:25:29,218 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:25:29,239 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:25:29,239 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,242 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=28, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:25:29,242 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 }, snapshot procedure id = 28 2024-11-28T00:25:29,244 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=28, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=28, snapshot={ ss=snapshot-testExportFileSystemStateWithSplitRegion table=testExportFileSystemStateWithSplitRegion type=FLUSH ttl=0 } in 439 msec 2024-11-28T00:25:29,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=28 2024-11-28T00:25:29,431 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:25:29,432 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431 2024-11-28T00:25:29,432 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:29,485 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:25:29,485 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,492 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:25:29,517 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/.tmp/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:25:29,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:25:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:25:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:25:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741873_1049 (size=197) 2024-11-28T00:25:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:25:29,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741874_1050 (size=891) 2024-11-28T00:25:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,114 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-4928266959361133283.jar 2024-11-28T00:25:31,115 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,116 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,205 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-8658866910979740745.jar 2024-11-28T00:25:31,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,206 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,207 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:25:31,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:25:31,208 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:25:31,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:25:31,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:25:31,209 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:25:31,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:25:31,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:25:31,210 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:25:31,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:25:31,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:25:31,211 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:25:31,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:25:31,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:25:31,214 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:25:31,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:25:31,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:25:31,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:25:31,215 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:25:31,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741875_1051 (size=131440) 2024-11-28T00:25:31,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741875_1051 (size=131440) 2024-11-28T00:25:31,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741875_1051 (size=131440) 2024-11-28T00:25:31,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741876_1052 (size=4188619) 2024-11-28T00:25:31,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741876_1052 (size=4188619) 2024-11-28T00:25:31,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741876_1052 (size=4188619) 2024-11-28T00:25:31,459 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741877_1053 (size=1323991) 2024-11-28T00:25:31,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741877_1053 (size=1323991) 2024-11-28T00:25:31,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741877_1053 (size=1323991) 2024-11-28T00:25:31,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741878_1054 (size=903927) 2024-11-28T00:25:31,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741878_1054 (size=903927) 2024-11-28T00:25:31,484 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741878_1054 (size=903927) 2024-11-28T00:25:31,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741879_1055 (size=8360083) 2024-11-28T00:25:31,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741879_1055 (size=8360083) 2024-11-28T00:25:31,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741879_1055 (size=8360083) 2024-11-28T00:25:31,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741880_1056 (size=1877034) 2024-11-28T00:25:31,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741880_1056 (size=1877034) 2024-11-28T00:25:31,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741880_1056 (size=1877034) 2024-11-28T00:25:32,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741881_1057 (size=77835) 2024-11-28T00:25:32,001 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741881_1057 (size=77835) 2024-11-28T00:25:32,002 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741881_1057 (size=77835) 2024-11-28T00:25:32,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741882_1058 (size=30949) 2024-11-28T00:25:32,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741882_1058 (size=30949) 2024-11-28T00:25:32,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741882_1058 (size=30949) 2024-11-28T00:25:32,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741883_1059 (size=1597213) 2024-11-28T00:25:32,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741883_1059 (size=1597213) 2024-11-28T00:25:32,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741883_1059 (size=1597213) 2024-11-28T00:25:32,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741884_1060 (size=6424743) 2024-11-28T00:25:32,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741884_1060 (size=6424743) 2024-11-28T00:25:32,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741884_1060 (size=6424743) 2024-11-28T00:25:32,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741885_1061 (size=440957) 2024-11-28T00:25:32,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741885_1061 (size=440957) 2024-11-28T00:25:32,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741885_1061 (size=440957) 2024-11-28T00:25:32,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741886_1062 (size=4695811) 2024-11-28T00:25:32,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741886_1062 (size=4695811) 2024-11-28T00:25:32,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741886_1062 (size=4695811) 2024-11-28T00:25:32,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741887_1063 (size=232957) 2024-11-28T00:25:32,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741887_1063 (size=232957) 2024-11-28T00:25:32,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741887_1063 (size=232957) 2024-11-28T00:25:32,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741888_1064 (size=127628) 2024-11-28T00:25:32,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741888_1064 (size=127628) 2024-11-28T00:25:32,147 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741888_1064 (size=127628) 2024-11-28T00:25:32,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741889_1065 (size=20406) 2024-11-28T00:25:32,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741889_1065 (size=20406) 2024-11-28T00:25:32,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741889_1065 (size=20406) 2024-11-28T00:25:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741890_1066 (size=5175431) 2024-11-28T00:25:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741890_1066 (size=5175431) 2024-11-28T00:25:32,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741890_1066 (size=5175431) 2024-11-28T00:25:32,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741891_1067 (size=217634) 2024-11-28T00:25:32,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741891_1067 (size=217634) 2024-11-28T00:25:32,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741891_1067 (size=217634) 2024-11-28T00:25:32,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741892_1068 (size=1832290) 2024-11-28T00:25:32,710 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741892_1068 (size=1832290) 2024-11-28T00:25:32,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741892_1068 (size=1832290) 2024-11-28T00:25:32,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741893_1069 (size=322274) 2024-11-28T00:25:32,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741893_1069 (size=322274) 2024-11-28T00:25:32,761 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741893_1069 (size=322274) 2024-11-28T00:25:32,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741894_1070 (size=503880) 2024-11-28T00:25:32,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741894_1070 (size=503880) 2024-11-28T00:25:32,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741894_1070 (size=503880) 2024-11-28T00:25:32,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741895_1071 (size=29229) 2024-11-28T00:25:32,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741895_1071 (size=29229) 2024-11-28T00:25:32,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741895_1071 (size=29229) 2024-11-28T00:25:32,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741896_1072 (size=24096) 2024-11-28T00:25:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741896_1072 (size=24096) 2024-11-28T00:25:32,947 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741896_1072 (size=24096) 2024-11-28T00:25:33,369 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:25:33,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741897_1073 (size=111872) 2024-11-28T00:25:33,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741897_1073 (size=111872) 2024-11-28T00:25:33,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741897_1073 (size=111872) 2024-11-28T00:25:33,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741898_1074 (size=45609) 2024-11-28T00:25:33,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741898_1074 (size=45609) 2024-11-28T00:25:33,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741898_1074 (size=45609) 2024-11-28T00:25:33,947 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:25:34,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741899_1075 (size=136454) 2024-11-28T00:25:34,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741899_1075 (size=136454) 2024-11-28T00:25:34,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741899_1075 (size=136454) 2024-11-28T00:25:34,324 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:25:34,333 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snapshot-testExportFileSystemStateWithSplitRegion' hfile list 2024-11-28T00:25:34,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=0fd1f594fbf245ea623b9011d451f99d-56258cb69c564f4db0d329bb9cb9d267_SeqId_4_. 2024-11-28T00:25:34,340 DEBUG [Time-limited test {}] snapshot.ExportSnapshot$1(689): Skip the existing file: cf/testExportFileSystemStateWithSplitRegion=0fd1f594fbf245ea623b9011d451f99d-56258cb69c564f4db0d329bb9cb9d267_SeqId_4_. 2024-11-28T00:25:34,341 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=305.6 M 2024-11-28T00:25:34,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:25:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:25:34,383 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741900_1076 (size=244) 2024-11-28T00:25:34,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:25:34,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:25:34,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741901_1077 (size=17) 2024-11-28T00:25:34,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741902_1078 (size=304050) 2024-11-28T00:25:34,600 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741902_1078 (size=304050) 2024-11-28T00:25:34,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741902_1078 (size=304050) 2024-11-28T00:25:35,024 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:25:35,025 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:25:35,373 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0001_000001 (auth:SIMPLE) from 127.0.0.1:40350 2024-11-28T00:25:45,818 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0001_000001 (auth:SIMPLE) from 127.0.0.1:51990 2024-11-28T00:25:46,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741903_1079 (size=349748) 2024-11-28T00:25:46,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741903_1079 (size=349748) 2024-11-28T00:25:46,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741903_1079 (size=349748) 2024-11-28T00:25:48,351 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0001_000001 (auth:SIMPLE) from 127.0.0.1:50146 2024-11-28T00:25:50,604 INFO [master/72c8c7f04009:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-28T00:25:50,604 INFO [master/72c8c7f04009:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-28T00:26:01,929 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7e74f876de2bcc196d763b50943e18f2, had cached 0 bytes from a total of 6418 2024-11-28T00:26:01,941 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 875b43592001846889c7b989a9e36642, had cached 0 bytes from a total of 15277 2024-11-28T00:26:03,948 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:26:10,392 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 7e74f876de2bcc196d763b50943e18f2 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:26:10,393 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 91195ca5c70c54af0f84245ffc432b72 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:26:10,394 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 875b43592001846889c7b989a9e36642 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:26:13,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83f789a3ea94875e436296f93aa0d60a, had cached 0 bytes from a total of 320414712 2024-11-28T00:26:13,600 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2301728e3e74ca9b7c47c3329302d12d, had cached 0 bytes from a total of 320414712 2024-11-28T00:26:32,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:26:32,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:26:32,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741904_1080 (size=134217728) 2024-11-28T00:26:33,948 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:26:46,930 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 7e74f876de2bcc196d763b50943e18f2, had cached 0 bytes from a total of 6418 2024-11-28T00:26:46,942 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 875b43592001846889c7b989a9e36642, had cached 0 bytes from a total of 15277 2024-11-28T00:26:58,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 83f789a3ea94875e436296f93aa0d60a, had cached 0 bytes from a total of 320414712 2024-11-28T00:26:58,600 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 2301728e3e74ca9b7c47c3329302d12d, had cached 0 bytes from a total of 320414712 2024-11-28T00:27:03,948 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:27:11,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:27:11,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:27:11,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741905_1081 (size=134217728) 2024-11-28T00:27:26,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:27:26,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:27:26,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741906_1082 (size=51979256) 2024-11-28T00:27:27,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:27:27,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:27:27,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741907_1083 (size=17520) 2024-11-28T00:27:27,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:27:27,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:27:27,072 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741908_1084 (size=482) 2024-11-28T00:27:27,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:27:27,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:27:27,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741909_1085 (size=17520) 2024-11-28T00:27:27,121 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000002/launch_container.sh] 2024-11-28T00:27:27,122 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000002/container_tokens] 2024-11-28T00:27:27,122 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000002/sysfs] 2024-11-28T00:27:27,559 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741910_1086 (size=349748) 2024-11-28T00:27:27,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741910_1086 (size=349748) 2024-11-28T00:27:27,561 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741910_1086 (size=349748) 2024-11-28T00:27:28,904 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:27:28,907 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:27:28,918 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:28,919 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:27:28,919 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:27:28,919 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:28,920 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-28T00:27:28,920 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-28T00:27:28,920 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:28,921 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/.snapshotinfo 2024-11-28T00:27:28,921 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753529431/.hbase-snapshot/snapshot-testExportFileSystemStateWithSplitRegion/data.manifest 2024-11-28T00:27:28,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:28,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=31, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:28,947 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:27:28,947 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753648946"}]},"ts":"1732753648946"} 2024-11-28T00:27:28,949 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52065, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:28,951 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-28T00:27:28,951 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-28T00:27:28,954 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=32, ppid=31, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion}] 2024-11-28T00:27:28,958 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, UNASSIGN}, {pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, UNASSIGN}] 2024-11-28T00:27:28,960 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, UNASSIGN 2024-11-28T00:27:28,960 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, UNASSIGN 2024-11-28T00:27:28,961 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=83f789a3ea94875e436296f93aa0d60a, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:27:28,961 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=2301728e3e74ca9b7c47c3329302d12d, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:27:28,963 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=34, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, UNASSIGN because future has completed 2024-11-28T00:27:28,964 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:28,964 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:27:28,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=33, ppid=32, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, UNASSIGN because future has completed 2024-11-28T00:27:28,966 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:28,966 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:27:28,999 ERROR [ProcedureDispatcherTimeoutThread {}] procedure2.RemoteProcedureDispatcher$TimeoutExecutorThread(331): DelayQueue for RemoteProcedureDispatcher is not empty when timed waiting elapsed. If this is repeated consistently, it means no element is getting expired from the queue and it might freeze the system. Queue: [containedObject=72c8c7f04009,36427,1732753506332, timeout=1732753649118, delay=118, operations=[pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332, pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332]] 2024-11-28T00:27:29,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:27:29,120 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38193, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:27:29,120 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(122): Close 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:27:29,121 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:29,121 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1722): Closing 83f789a3ea94875e436296f93aa0d60a, disabling compactions & flushes 2024-11-28T00:27:29,121 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:27:29,121 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:27:29,121 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. after waiting 0 ms 2024-11-28T00:27:29,121 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:27:29,128 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-28T00:27:29,129 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:29,129 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a. 2024-11-28T00:27:29,129 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] regionserver.HRegion(1676): Region close journal for 83f789a3ea94875e436296f93aa0d60a: Waiting for close lock at 1732753649121Running coprocessor pre-close hooks at 1732753649121Disabling compacts and flushes for region at 1732753649121Disabling writes for close at 1732753649121Writing region close event to WAL at 1732753649122 (+1 ms)Running coprocessor post-close hooks at 1732753649129 (+7 ms)Closed at 1732753649129 2024-11-28T00:27:29,132 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=35}] handler.UnassignRegionHandler(157): Closed 83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:27:29,132 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(122): Close 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:27:29,132 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:29,132 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1722): Closing 2301728e3e74ca9b7c47c3329302d12d, disabling compactions & flushes 2024-11-28T00:27:29,132 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1755): Closing region testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:27:29,132 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1776): Time limited wait for close lock on testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:27:29,132 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1843): Acquired close lock on testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. after waiting 0 ms 2024-11-28T00:27:29,132 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1853): Updates disabled for region testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:27:29,132 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=34 updating hbase:meta row=83f789a3ea94875e436296f93aa0d60a, regionState=CLOSED 2024-11-28T00:27:29,135 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=35, ppid=34, state=RUNNABLE, hasLock=false; CloseRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:27:29,139 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/recovered.edits/10.seqid, newMaxSeqId=10, maxSeqId=6 2024-11-28T00:27:29,140 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:29,141 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1973): Closed testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d. 2024-11-28T00:27:29,141 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] regionserver.HRegion(1676): Region close journal for 2301728e3e74ca9b7c47c3329302d12d: Waiting for close lock at 1732753649132Running coprocessor pre-close hooks at 1732753649132Disabling compacts and flushes for region at 1732753649132Disabling writes for close at 1732753649132Writing region close event to WAL at 1732753649133 (+1 ms)Running coprocessor post-close hooks at 1732753649140 (+7 ms)Closed at 1732753649141 (+1 ms) 2024-11-28T00:27:29,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=35, resume processing ppid=34 2024-11-28T00:27:29,142 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=35, ppid=34, state=SUCCESS, hasLock=false; CloseRegionProcedure 83f789a3ea94875e436296f93aa0d60a, server=72c8c7f04009,36427,1732753506332 in 173 msec 2024-11-28T00:27:29,144 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=36}] handler.UnassignRegionHandler(157): Closed 2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:27:29,145 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=34, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=83f789a3ea94875e436296f93aa0d60a, UNASSIGN in 184 msec 2024-11-28T00:27:29,145 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=33 updating hbase:meta row=2301728e3e74ca9b7c47c3329302d12d, regionState=CLOSED 2024-11-28T00:27:29,147 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=36, ppid=33, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:27:29,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=36, resume processing ppid=33 2024-11-28T00:27:29,151 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=36, ppid=33, state=SUCCESS, hasLock=false; CloseRegionProcedure 2301728e3e74ca9b7c47c3329302d12d, server=72c8c7f04009,36427,1732753506332 in 182 msec 2024-11-28T00:27:29,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=33, resume processing ppid=32 2024-11-28T00:27:29,154 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=33, ppid=32, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportFileSystemStateWithSplitRegion, region=2301728e3e74ca9b7c47c3329302d12d, UNASSIGN in 193 msec 2024-11-28T00:27:29,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=32, resume processing ppid=31 2024-11-28T00:27:29,158 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=32, ppid=31, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportFileSystemStateWithSplitRegion in 202 msec 2024-11-28T00:27:29,160 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753649160"}]},"ts":"1732753649160"} 2024-11-28T00:27:29,162 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-28T00:27:29,162 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-28T00:27:29,165 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=31, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportFileSystemStateWithSplitRegion in 224 msec 2024-11-28T00:27:29,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=31 2024-11-28T00:27:29,262 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:27:29,266 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,273 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=37, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,274 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,276 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=37, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,276 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42577, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:29,279 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39699, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=ClientService 2024-11-28T00:27:29,281 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,285 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,286 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,286 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,286 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,287 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,288 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,289 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,289 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:27:29,289 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:27:29,290 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:27:29,291 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:29,291 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:29,291 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:29,291 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithSplitRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:29,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-28T00:27:29,296 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/recovered.edits] 2024-11-28T00:27:29,296 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/recovered.edits] 2024-11-28T00:27:29,296 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/recovered.edits] 2024-11-28T00:27:29,307 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_ 2024-11-28T00:27:29,307 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:27:29,307 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/cf/56258cb69c564f4db0d329bb9cb9d267_SeqId_4_.0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:27:29,313 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/recovered.edits/6.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d/recovered.edits/6.seqid 2024-11-28T00:27:29,313 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/recovered.edits/10.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d/recovered.edits/10.seqid 2024-11-28T00:27:29,314 DEBUG [HFileArchiver-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/0fd1f594fbf245ea623b9011d451f99d 2024-11-28T00:27:29,314 DEBUG [HFileArchiver-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/2301728e3e74ca9b7c47c3329302d12d 2024-11-28T00:27:29,314 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/recovered.edits/10.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a/recovered.edits/10.seqid 2024-11-28T00:27:29,315 DEBUG [HFileArchiver-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportFileSystemStateWithSplitRegion/83f789a3ea94875e436296f93aa0d60a 2024-11-28T00:27:29,315 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testExportFileSystemStateWithSplitRegion regions 2024-11-28T00:27:29,322 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=37, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,327 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] util.ReflectedFunctionCache(97): Populated cache for org.apache.hadoop.hbase.filter.KeyOnlyFilter in 0ms 2024-11-28T00:27:29,335 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 3 rows of testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-28T00:27:29,341 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-28T00:27:29,343 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=37, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,343 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testExportFileSystemStateWithSplitRegion' from region states. 2024-11-28T00:27:29,343 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753649343"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,344 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753649343"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,344 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753649343"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,348 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 3 regions from META 2024-11-28T00:27:29,348 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 0fd1f594fbf245ea623b9011d451f99d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753519531.0fd1f594fbf245ea623b9011d451f99d.', STARTKEY => '', ENDKEY => ''}, {ENCODED => 2301728e3e74ca9b7c47c3329302d12d, NAME => 'testExportFileSystemStateWithSplitRegion,,1732753527607.2301728e3e74ca9b7c47c3329302d12d.', STARTKEY => '', ENDKEY => '5'}, {ENCODED => 83f789a3ea94875e436296f93aa0d60a, NAME => 'testExportFileSystemStateWithSplitRegion,5,1732753527607.83f789a3ea94875e436296f93aa0d60a.', STARTKEY => '5', ENDKEY => ''}] 2024-11-28T00:27:29,348 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-28T00:27:29,349 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753649348"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,352 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testExportFileSystemStateWithSplitRegion state from META 2024-11-28T00:27:29,354 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=37, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,356 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=37, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportFileSystemStateWithSplitRegion in 87 msec 2024-11-28T00:27:29,401 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=37 2024-11-28T00:27:29,403 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,404 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:27:29,405 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,407 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=38, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:27:29,411 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753649410"}]},"ts":"1732753649410"} 2024-11-28T00:27:29,413 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLING in hbase:meta 2024-11-28T00:27:29,413 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLING 2024-11-28T00:27:29,414 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=39, ppid=38, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion}] 2024-11-28T00:27:29,416 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, UNASSIGN}, {pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, UNASSIGN}] 2024-11-28T00:27:29,417 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, UNASSIGN 2024-11-28T00:27:29,417 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, UNASSIGN 2024-11-28T00:27:29,418 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=875b43592001846889c7b989a9e36642, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:29,418 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=7e74f876de2bcc196d763b50943e18f2, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:27:29,420 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=41, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, UNASSIGN because future has completed 2024-11-28T00:27:29,421 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:29,421 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:27:29,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=40, ppid=39, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, UNASSIGN because future has completed 2024-11-28T00:27:29,422 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:29,423 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:27:29,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:27:29,575 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34539, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:27:29,576 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(122): Close 875b43592001846889c7b989a9e36642 2024-11-28T00:27:29,577 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:29,577 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1722): Closing 875b43592001846889c7b989a9e36642, disabling compactions & flushes 2024-11-28T00:27:29,577 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:27:29,577 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:27:29,577 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. after waiting 0 ms 2024-11-28T00:27:29,577 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:27:29,578 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(122): Close 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:27:29,578 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:29,578 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1722): Closing 7e74f876de2bcc196d763b50943e18f2, disabling compactions & flushes 2024-11-28T00:27:29,579 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:27:29,579 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:27:29,579 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. after waiting 0 ms 2024-11-28T00:27:29,579 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:27:29,589 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:27:29,590 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:29,590 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2. 2024-11-28T00:27:29,590 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] regionserver.HRegion(1676): Region close journal for 7e74f876de2bcc196d763b50943e18f2: Waiting for close lock at 1732753649578Running coprocessor pre-close hooks at 1732753649578Disabling compacts and flushes for region at 1732753649578Disabling writes for close at 1732753649579 (+1 ms)Writing region close event to WAL at 1732753649584 (+5 ms)Running coprocessor post-close hooks at 1732753649590 (+6 ms)Closed at 1732753649590 2024-11-28T00:27:29,592 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:27:29,593 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=43}] handler.UnassignRegionHandler(157): Closed 7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:27:29,593 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:29,594 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642. 2024-11-28T00:27:29,594 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] regionserver.HRegion(1676): Region close journal for 875b43592001846889c7b989a9e36642: Waiting for close lock at 1732753649577Running coprocessor pre-close hooks at 1732753649577Disabling compacts and flushes for region at 1732753649577Disabling writes for close at 1732753649577Writing region close event to WAL at 1732753649578 (+1 ms)Running coprocessor post-close hooks at 1732753649593 (+15 ms)Closed at 1732753649594 (+1 ms) 2024-11-28T00:27:29,594 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=40 updating hbase:meta row=7e74f876de2bcc196d763b50943e18f2, regionState=CLOSED 2024-11-28T00:27:29,596 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=43, ppid=40, state=RUNNABLE, hasLock=false; CloseRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:27:29,597 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=42}] handler.UnassignRegionHandler(157): Closed 875b43592001846889c7b989a9e36642 2024-11-28T00:27:29,610 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=41 updating hbase:meta row=875b43592001846889c7b989a9e36642, regionState=CLOSED 2024-11-28T00:27:29,613 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=42, ppid=41, state=RUNNABLE, hasLock=false; CloseRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:27:29,615 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=43, resume processing ppid=40 2024-11-28T00:27:29,616 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=43, ppid=40, state=SUCCESS, hasLock=false; CloseRegionProcedure 7e74f876de2bcc196d763b50943e18f2, server=72c8c7f04009,36427,1732753506332 in 189 msec 2024-11-28T00:27:29,618 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=40, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=7e74f876de2bcc196d763b50943e18f2, UNASSIGN in 200 msec 2024-11-28T00:27:29,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=42, resume processing ppid=41 2024-11-28T00:27:29,618 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=42, ppid=41, state=SUCCESS, hasLock=false; CloseRegionProcedure 875b43592001846889c7b989a9e36642, server=72c8c7f04009,43335,1732753506004 in 194 msec 2024-11-28T00:27:29,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=41, resume processing ppid=39 2024-11-28T00:27:29,620 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=41, ppid=39, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSplitRegion, region=875b43592001846889c7b989a9e36642, UNASSIGN in 202 msec 2024-11-28T00:27:29,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=39, resume processing ppid=38 2024-11-28T00:27:29,623 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=39, ppid=38, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 207 msec 2024-11-28T00:27:29,626 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753649625"}]},"ts":"1732753649625"} 2024-11-28T00:27:29,629 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSplitRegion, state=DISABLED in hbase:meta 2024-11-28T00:27:29,629 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSplitRegion to state=DISABLED 2024-11-28T00:27:29,631 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=38, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 224 msec 2024-11-28T00:27:29,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=38 2024-11-28T00:27:29,731 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:27:29,732 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,733 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,734 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=44, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,735 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=44, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,738 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,740 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:27:29,740 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642 2024-11-28T00:27:29,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,741 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,742 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,742 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSplitRegion with data PBUF 2024-11-28T00:27:29,743 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/recovered.edits] 2024-11-28T00:27:29,743 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/recovered.edits] 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,744 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:29,745 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-28T00:27:29,749 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/cf/c9bd37999cd5409892cf834931c29052 2024-11-28T00:27:29,749 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/cf/a3f5883589fd4ed8be2745857322dd92 2024-11-28T00:27:29,757 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2/recovered.edits/9.seqid 2024-11-28T00:27:29,758 DEBUG [HFileArchiver-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:27:29,759 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642/recovered.edits/9.seqid 2024-11-28T00:27:29,760 DEBUG [HFileArchiver-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSplitRegion/875b43592001846889c7b989a9e36642 2024-11-28T00:27:29,760 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSplitRegion regions 2024-11-28T00:27:29,761 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-28T00:27:29,762 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf] 2024-11-28T00:27:29,766 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/c4ca4238a0b923820dcc509a6f75849b20241128fd128a202d0742e9ace6f5266449fae6_875b43592001846889c7b989a9e36642 2024-11-28T00:27:29,768 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c/cf/d41d8cd98f00b204e9800998ecf8427e20241128ef7f67eb3f95433290fedc0436e3075b_7e74f876de2bcc196d763b50943e18f2 2024-11-28T00:27:29,769 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSplitRegion/8bc6791f9a8595eff0f40af260f3f56c 2024-11-28T00:27:29,771 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=44, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,775 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSplitRegion from hbase:meta 2024-11-28T00:27:29,778 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSplitRegion' descriptor. 2024-11-28T00:27:29,780 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=44, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,780 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSplitRegion' from region states. 2024-11-28T00:27:29,780 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753649780"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,780 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753649780"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,783 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:27:29,783 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 7e74f876de2bcc196d763b50943e18f2, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,,1732753516371.7e74f876de2bcc196d763b50943e18f2.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 875b43592001846889c7b989a9e36642, NAME => 'testtb-testExportFileSystemStateWithSplitRegion,1,1732753516371.875b43592001846889c7b989a9e36642.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:27:29,783 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSplitRegion' as deleted. 2024-11-28T00:27:29,783 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSplitRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753649783"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:29,786 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSplitRegion state from META 2024-11-28T00:27:29,786 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=44, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,788 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=44, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSplitRegion in 54 msec 2024-11-28T00:27:29,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=44 2024-11-28T00:27:29,852 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,852 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSplitRegion completed 2024-11-28T00:27:29,872 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:27:29,874 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:27:29,879 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,881 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSplitRegion" type: DISABLED 2024-11-28T00:27:29,883 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:29,915 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSplitRegion Thread=750 (was 712) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_623941655_1 at /127.0.0.1:48184 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:38280 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-1400 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: HFileArchiver-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #0 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.read1(BufferedReader.java:213) java.base@17.0.11/java.io.BufferedReader.read(BufferedReader.java:287) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.parseExecResult(Shell.java:1295) app//org.apache.hadoop.util.Shell.runCommand(Shell.java:1054) app//org.apache.hadoop.util.Shell.run(Shell.java:959) app//org.apache.hadoop.util.Shell$ShellCommandExecutor.execute(Shell.java:1282) app//org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor.launchContainer(DefaultContainerExecutor.java:349) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.launchContainer(ContainerLaunch.java:600) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:388) app//org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch.call(ContainerLaunch.java:105) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:33538 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-4-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_623941655_1 at /127.0.0.1:33530 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 16860) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_COMPACTED_FILES_DISCHARGER-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DeletionService #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:48220 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:38599 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RSProcedureDispatcher-pool-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: zk-permission-watcher-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Container metrics unregistration java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38599 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=765 (was 755) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=465 (was 343) - SystemLoadAverage LEAK? -, ProcessCount=19 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=1176 (was 7495) 2024-11-28T00:27:29,916 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=750 is superior to 500 2024-11-28T00:27:29,934 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=750, OpenFileDescriptor=765, MaxFileDescriptor=1048576, SystemLoadAverage=465, ProcessCount=19, AvailableMemoryMB=1172 2024-11-28T00:27:29,934 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=750 is superior to 500 2024-11-28T00:27:29,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:27:29,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:29,940 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:27:29,940 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithTargetName" procId is: 45 2024-11-28T00:27:29,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:27:29,942 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:27:29,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:27:29,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:27:29,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741911_1087 (size=442) 2024-11-28T00:27:29,954 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 07a29275a235142b029c2db26d7133da, NAME => 'testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:29,954 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => eb1846f897c221e883336771cdcfcd5a, NAME => 'testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithTargetName', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:29,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:27:29,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:27:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741913_1089 (size=67) 2024-11-28T00:27:29,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:27:29,965 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:29,965 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1722): Closing eb1846f897c221e883336771cdcfcd5a, disabling compactions & flushes 2024-11-28T00:27:29,966 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:29,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741912_1088 (size=67) 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. after waiting 0 ms 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:29,966 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-1 {}] regionserver.HRegion(1676): Region close journal for eb1846f897c221e883336771cdcfcd5a: Waiting for close lock at 1732753649966Disabling compacts and flushes for region at 1732753649966Disabling writes for close at 1732753649966Writing region close event to WAL at 1732753649966Closed at 1732753649966 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:29,966 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1722): Closing 07a29275a235142b029c2db26d7133da, disabling compactions & flushes 2024-11-28T00:27:29,967 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:29,967 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:29,967 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. after waiting 0 ms 2024-11-28T00:27:29,967 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:29,967 INFO [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:29,967 DEBUG [RegionOpenAndInit-testtb-testExportWithTargetName-pool-0 {}] regionserver.HRegion(1676): Region close journal for 07a29275a235142b029c2db26d7133da: Waiting for close lock at 1732753649966Disabling compacts and flushes for region at 1732753649966Disabling writes for close at 1732753649967 (+1 ms)Writing region close event to WAL at 1732753649967Closed at 1732753649967 2024-11-28T00:27:29,968 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:27:29,969 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732753649968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753649968"}]},"ts":"1732753649968"} 2024-11-28T00:27:29,969 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.","families":{"info":[{"qualifier":"regioninfo","vlen":66,"tag":[],"timestamp":"1732753649968"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753649968"}]},"ts":"1732753649968"} 2024-11-28T00:27:29,972 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:27:29,973 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:27:29,974 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753649973"}]},"ts":"1732753649973"} 2024-11-28T00:27:29,977 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLING in hbase:meta 2024-11-28T00:27:29,977 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:27:29,979 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:27:29,980 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:27:29,980 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:27:29,980 INFO [PEWorker-4 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:27:29,980 DEBUG [PEWorker-4 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:27:29,980 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, ASSIGN}, {pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, ASSIGN}] 2024-11-28T00:27:29,982 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, ASSIGN 2024-11-28T00:27:29,983 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, ASSIGN 2024-11-28T00:27:29,984 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:27:29,985 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:27:30,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:27:30,135 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:27:30,136 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=07a29275a235142b029c2db26d7133da, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:27:30,136 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=eb1846f897c221e883336771cdcfcd5a, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:30,139 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=47, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, ASSIGN because future has completed 2024-11-28T00:27:30,139 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:27:30,140 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=46, ppid=45, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, ASSIGN because future has completed 2024-11-28T00:27:30,141 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:27:30,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:27:30,298 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:30,298 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7752): Opening region: {ENCODED => eb1846f897c221e883336771cdcfcd5a, NAME => 'testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:27:30,299 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. service=AccessControlService 2024-11-28T00:27:30,300 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:30,300 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43007, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:27:30,300 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,300 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:30,300 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7794): checking encryption for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,300 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(7797): checking classloading for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,302 INFO [StoreOpener-eb1846f897c221e883336771cdcfcd5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,305 INFO [StoreOpener-eb1846f897c221e883336771cdcfcd5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region eb1846f897c221e883336771cdcfcd5a columnFamilyName cf 2024-11-28T00:27:30,306 DEBUG [StoreOpener-eb1846f897c221e883336771cdcfcd5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:30,308 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(132): Open testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,308 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7752): Opening region: {ENCODED => 07a29275a235142b029c2db26d7133da, NAME => 'testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:27:30,308 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. service=AccessControlService 2024-11-28T00:27:30,309 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:30,309 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithTargetName 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,309 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(898): Instantiated testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:30,309 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7794): checking encryption for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,309 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(7797): checking classloading for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,314 INFO [StoreOpener-eb1846f897c221e883336771cdcfcd5a-1 {}] regionserver.HStore(327): Store=eb1846f897c221e883336771cdcfcd5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:30,314 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1038): replaying wal for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,315 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,316 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,317 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1048): stopping wal replay for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,317 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1060): Cleaning up temporary data for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,320 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1093): writing seq id for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,333 INFO [StoreOpener-07a29275a235142b029c2db26d7133da-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,335 INFO [StoreOpener-07a29275a235142b029c2db26d7133da-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 07a29275a235142b029c2db26d7133da columnFamilyName cf 2024-11-28T00:27:30,336 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:30,336 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1114): Opened eb1846f897c221e883336771cdcfcd5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59797824, jitterRate=-0.10894298553466797}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:30,337 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1122): Running coprocessor post-open hooks for eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,337 DEBUG [StoreOpener-07a29275a235142b029c2db26d7133da-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:30,338 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegion(1006): Region open journal for eb1846f897c221e883336771cdcfcd5a: Running coprocessor pre-open hook at 1732753650301Writing region info on filesystem at 1732753650301Initializing all the Stores at 1732753650302 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753650302Cleaning up temporary data from old regions at 1732753650317 (+15 ms)Running coprocessor post-open hooks at 1732753650337 (+20 ms)Region opened successfully at 1732753650338 (+1 ms) 2024-11-28T00:27:30,338 INFO [StoreOpener-07a29275a235142b029c2db26d7133da-1 {}] regionserver.HStore(327): Store=07a29275a235142b029c2db26d7133da/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:30,339 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1038): replaying wal for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,339 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a., pid=48, masterSystemTime=1732753650294 2024-11-28T00:27:30,340 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,342 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,342 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:30,342 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=48}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:30,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1048): stopping wal replay for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1060): Cleaning up temporary data for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,343 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=47 updating hbase:meta row=eb1846f897c221e883336771cdcfcd5a, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:30,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=48, ppid=47, state=RUNNABLE, hasLock=false; OpenRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:27:30,350 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1093): writing seq id for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=48, resume processing ppid=47 2024-11-28T00:27:30,351 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=48, ppid=47, state=SUCCESS, hasLock=false; OpenRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004 in 209 msec 2024-11-28T00:27:30,353 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=47, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, ASSIGN in 371 msec 2024-11-28T00:27:30,354 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:30,354 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1114): Opened 07a29275a235142b029c2db26d7133da; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=63487620, jitterRate=-0.05396074056625366}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:30,355 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,355 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegion(1006): Region open journal for 07a29275a235142b029c2db26d7133da: Running coprocessor pre-open hook at 1732753650309Writing region info on filesystem at 1732753650310 (+1 ms)Initializing all the Stores at 1732753650317 (+7 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753650317Cleaning up temporary data from old regions at 1732753650343 (+26 ms)Running coprocessor post-open hooks at 1732753650355 (+12 ms)Region opened successfully at 1732753650355 2024-11-28T00:27:30,359 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da., pid=49, masterSystemTime=1732753650299 2024-11-28T00:27:30,362 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,362 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=49}] handler.AssignRegionHandler(153): Opened testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,363 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=46 updating hbase:meta row=07a29275a235142b029c2db26d7133da, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:27:30,365 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=49, ppid=46, state=RUNNABLE, hasLock=false; OpenRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:27:30,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=49, resume processing ppid=46 2024-11-28T00:27:30,370 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=49, ppid=46, state=SUCCESS, hasLock=false; OpenRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207 in 226 msec 2024-11-28T00:27:30,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=46, resume processing ppid=45 2024-11-28T00:27:30,373 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=46, ppid=45, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, ASSIGN in 390 msec 2024-11-28T00:27:30,375 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:27:30,375 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753650375"}]},"ts":"1732753650375"} 2024-11-28T00:27:30,378 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=ENABLED in hbase:meta 2024-11-28T00:27:30,379 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=45, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithTargetName execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:27:30,379 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithTargetName jenkins: RWXCA 2024-11-28T00:27:30,384 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:27:30,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:30,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:30,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:30,387 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:30,390 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:30,390 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:30,391 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:30,392 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=45, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithTargetName in 453 msec 2024-11-28T00:27:30,392 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF\x0AE\x0A\x07jenkins\x12:\x08\x03"6\x0A*\x0A\x07default\x12\x1Ftesttb-testExportWithTargetName \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:30,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=45 2024-11-28T00:27:30,572 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:27:30,573 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:30,575 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48394, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-28T00:27:30,582 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,583 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:30,586 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:30,593 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:30,598 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:30,611 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43934, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,618 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46240, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,620 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:30,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:27:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753650626 (current time:1732753650626). 2024-11-28T00:27:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:27:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-28T00:27:30,626 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:27:30,627 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@678bc41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:30,628 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:30,629 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:30,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@279407c1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:30,630 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:30,631 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,631 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32848, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:30,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30ebb224, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,633 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:30,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:30,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:30,636 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48408, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,638 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,638 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,639 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@605529d1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:30,642 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:30,643 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:30,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:30,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:30,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@790780e9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:30,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:30,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,645 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:30,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e3631de, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:30,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:30,648 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:30,649 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:30,650 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48416, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:27:30,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:30,655 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43944, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:30,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:30,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:30,657 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:30,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:27:30,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:27:30,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:27:30,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-28T00:27:30,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:27:30,664 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:27:30,666 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:27:30,670 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:27:30,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:27:30,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:27:30,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741914_1090 (size=167) 2024-11-28T00:27:30,696 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:27:30,696 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da}, {pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a}] 2024-11-28T00:27:30,698 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,699 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:27:30,851 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=52 2024-11-28T00:27:30,852 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=51 2024-11-28T00:27:30,852 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:30,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.HRegion(2603): Flush status journal for eb1846f897c221e883336771cdcfcd5a: 2024-11-28T00:27:30,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. for emptySnaptb0-testExportWithTargetName completed. 2024-11-28T00:27:30,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:30,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:30,853 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:27:30,854 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.HRegion(2603): Flush status journal for 07a29275a235142b029c2db26d7133da: 2024-11-28T00:27:30,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. for emptySnaptb0-testExportWithTargetName completed. 2024-11-28T00:27:30,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.' region-info for snapshot=emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:30,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:30,855 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:27:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:27:30,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:27:30,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741915_1091 (size=70) 2024-11-28T00:27:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:30,892 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=52}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=52 2024-11-28T00:27:30,893 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=52 2024-11-28T00:27:30,893 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,893 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=52, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:30,898 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=52, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a in 200 msec 2024-11-28T00:27:30,905 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:27:30,906 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:27:30,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741916_1092 (size=70) 2024-11-28T00:27:30,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:30,908 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=51}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=51 2024-11-28T00:27:30,909 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=51 2024-11-28T00:27:30,909 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithTargetName on region 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,909 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=51, ppid=50, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:30,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=51, resume processing ppid=50 2024-11-28T00:27:30,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=51, ppid=50, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da in 214 msec 2024-11-28T00:27:30,917 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:27:30,918 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:27:30,920 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:27:30,920 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:27:30,921 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:30,924 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:27:30,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:27:30,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:27:30,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:27:30,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741917_1093 (size=62) 2024-11-28T00:27:31,000 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:27:31,000 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:31,001 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:31,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:27:31,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:27:31,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741918_1094 (size=649) 2024-11-28T00:27:31,089 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:27:31,114 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:27:31,115 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithTargetName to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:31,118 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=50, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:27:31,118 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 50 2024-11-28T00:27:31,120 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=50, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=50, snapshot={ ss=emptySnaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 459 msec 2024-11-28T00:27:31,291 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=50 2024-11-28T00:27:31,292 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:27:31,302 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:31,304 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:31,308 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:31,312 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithTargetName 2024-11-28T00:27:31,312 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:31,312 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:31,314 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:31,322 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:31,328 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithTargetName,, stopping at row=testtb-testExportWithTargetName ,, for max=2147483647 with caching=100 2024-11-28T00:27:31,333 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:27:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753651333 (current time:1732753651333). 2024-11-28T00:27:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:27:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithTargetName VERSION not specified, setting to 2 2024-11-28T00:27:31,333 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:27:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@535b14b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:31,334 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:31,335 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:31,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:31,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:31,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75122d6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:31,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:31,336 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,337 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32902, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:31,338 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f5dd7cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:31,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:31,340 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:31,341 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48418, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:31,343 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,343 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1e22a818, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:31,344 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:31,345 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:31,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:31,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:31,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b8f3d40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,345 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:31,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:31,346 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,347 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32924, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:31,347 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39db49ba, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:31,348 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:31,349 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:31,350 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:31,351 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48430, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:31,353 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithTargetName', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:27:31,354 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:31,355 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43950, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:31,356 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:31,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:31,357 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:31,357 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithTargetName], kv [jenkins: RWXCA] 2024-11-28T00:27:31,358 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:27:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } 2024-11-28T00:27:31,359 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-28T00:27:31,360 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:27:31,360 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:27:31,362 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:27:31,365 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:27:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:27:31,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:27:31,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741919_1095 (size=162) 2024-11-28T00:27:31,384 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:27:31,385 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da}, {pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a}] 2024-11-28T00:27:31,386 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:31,386 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:31,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:27:31,538 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=54 2024-11-28T00:27:31,539 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=55 2024-11-28T00:27:31,539 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:31,540 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2902): Flushing eb1846f897c221e883336771cdcfcd5a 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-28T00:27:31,540 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:31,540 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2902): Flushing 07a29275a235142b029c2db26d7133da 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-28T00:27:31,592 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a is 71, key is 161663b8f2ca5e2162be7d4ba56c4cde/cf:q/1732753651304/Put/seqid=0 2024-11-28T00:27:31,594 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da is 71, key is 003ee889237da71eb44d8b5447c4590b/cf:q/1732753651302/Put/seqid=0 2024-11-28T00:27:31,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741920_1096 (size=8101) 2024-11-28T00:27:31,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741920_1096 (size=8101) 2024-11-28T00:27:31,621 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741920_1096 (size=8101) 2024-11-28T00:27:31,622 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:31,630 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:31,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/.tmp/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b, store: [table=testtb-testExportWithTargetName family=cf region=eb1846f897c221e883336771cdcfcd5a] 2024-11-28T00:27:31,632 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/.tmp/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b is 208, key is 18f05ebfd6db9fb48e1fcb20a06fe26b6/cf:q/1732753651304/Put/seqid=0 2024-11-28T00:27:31,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741921_1097 (size=5171) 2024-11-28T00:27:31,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741921_1097 (size=5171) 2024-11-28T00:27:31,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741921_1097 (size=5171) 2024-11-28T00:27:31,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741922_1098 (size=14745) 2024-11-28T00:27:31,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741922_1098 (size=14745) 2024-11-28T00:27:31,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741922_1098 (size=14745) 2024-11-28T00:27:31,672 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/.tmp/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b 2024-11-28T00:27:31,681 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:27:31,682 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/.tmp/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b 2024-11-28T00:27:31,687 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da 2024-11-28T00:27:31,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/.tmp/cf/3595e9df409a43d48105742754f24b72, store: [table=testtb-testExportWithTargetName family=cf region=07a29275a235142b029c2db26d7133da] 2024-11-28T00:27:31,690 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/.tmp/cf/3595e9df409a43d48105742754f24b72 is 208, key is 09da08b7257a19b7d30393462a1b4037f/cf:q/1732753651302/Put/seqid=0 2024-11-28T00:27:31,693 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b, entries=46, sequenceid=6, filesize=14.4 K 2024-11-28T00:27:31,694 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for eb1846f897c221e883336771cdcfcd5a in 155ms, sequenceid=6, compaction requested=false 2024-11-28T00:27:31,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithTargetName' 2024-11-28T00:27:31,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.HRegion(2603): Flush status journal for eb1846f897c221e883336771cdcfcd5a: 2024-11-28T00:27:31,695 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. for snaptb0-testExportWithTargetName completed. 2024-11-28T00:27:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:27:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b] hfiles 2024-11-28T00:27:31,696 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:27:31,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741923_1099 (size=6116) 2024-11-28T00:27:31,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741923_1099 (size=6116) 2024-11-28T00:27:31,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741923_1099 (size=6116) 2024-11-28T00:27:31,707 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/.tmp/cf/3595e9df409a43d48105742754f24b72 2024-11-28T00:27:31,714 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/.tmp/cf/3595e9df409a43d48105742754f24b72 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72 2024-11-28T00:27:31,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:27:31,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:27:31,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=55}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=55 2024-11-28T00:27:31,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741924_1100 (size=109) 2024-11-28T00:27:31,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=55 2024-11-28T00:27:31,721 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:31,721 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=55, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:31,722 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72, entries=4, sequenceid=6, filesize=6.0 K 2024-11-28T00:27:31,723 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 07a29275a235142b029c2db26d7133da in 183ms, sequenceid=6, compaction requested=false 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.HRegion(2603): Flush status journal for 07a29275a235142b029c2db26d7133da: 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. for snaptb0-testExportWithTargetName completed. 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.' region-info for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72] hfiles 2024-11-28T00:27:31,723 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72 for snapshot=snaptb0-testExportWithTargetName 2024-11-28T00:27:31,724 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=55, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure eb1846f897c221e883336771cdcfcd5a in 338 msec 2024-11-28T00:27:31,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:27:31,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:27:31,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741925_1101 (size=109) 2024-11-28T00:27:31,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:31,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=54}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=54 2024-11-28T00:27:31,736 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=54 2024-11-28T00:27:31,736 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithTargetName on region 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:31,736 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=54, ppid=53, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:31,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=54, resume processing ppid=53 2024-11-28T00:27:31,744 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=54, ppid=53, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 07a29275a235142b029c2db26d7133da in 354 msec 2024-11-28T00:27:31,744 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:27:31,745 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:27:31,747 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:27:31,747 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:27:31,747 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:31,749 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da] hfiles 2024-11-28T00:27:31,749 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:31,749 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da 2024-11-28T00:27:31,757 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:27:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:27:31,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741926_1102 (size=293) 2024-11-28T00:27:31,759 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:27:31,759 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithTargetName 2024-11-28T00:27:31,760 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName 2024-11-28T00:27:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:27:31,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:27:31,774 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741927_1103 (size=959) 2024-11-28T00:27:31,778 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:27:31,786 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:27:31,787 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithTargetName to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-28T00:27:31,788 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=53, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:27:31,789 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 }, snapshot procedure id = 53 2024-11-28T00:27:31,790 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=53, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=53, snapshot={ ss=snaptb0-testExportWithTargetName table=testtb-testExportWithTargetName type=FLUSH ttl=0 } in 431 msec 2024-11-28T00:27:31,992 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=53 2024-11-28T00:27:31,992 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:27:31,992 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992 2024-11-28T00:27:31,993 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:32,061 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:32,061 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-28T00:27:32,064 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:27:32,085 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/.tmp/testExportWithTargetName 2024-11-28T00:27:32,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:27:32,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:27:32,123 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741929_1105 (size=959) 2024-11-28T00:27:32,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:27:32,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:27:32,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741928_1104 (size=162) 2024-11-28T00:27:32,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:27:32,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:27:32,144 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741930_1106 (size=154) 2024-11-28T00:27:32,145 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:32,146 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,416 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-7138118113939119931.jar 2024-11-28T00:27:33,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,417 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,504 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-630651428720999791.jar 2024-11-28T00:27:33,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,505 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,506 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:27:33,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:27:33,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:27:33,507 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:27:33,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:27:33,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:27:33,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:27:33,508 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:27:33,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:27:33,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:27:33,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:27:33,509 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:27:33,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:27:33,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:27:33,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:27:33,510 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:27:33,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:27:33,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:27:33,511 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:27:33,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741931_1107 (size=131440) 2024-11-28T00:27:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741931_1107 (size=131440) 2024-11-28T00:27:33,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741931_1107 (size=131440) 2024-11-28T00:27:33,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741932_1108 (size=4188619) 2024-11-28T00:27:33,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741932_1108 (size=4188619) 2024-11-28T00:27:33,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741932_1108 (size=4188619) 2024-11-28T00:27:33,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741933_1109 (size=1323991) 2024-11-28T00:27:33,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741933_1109 (size=1323991) 2024-11-28T00:27:33,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741933_1109 (size=1323991) 2024-11-28T00:27:33,688 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0001_000001 (auth:SIMPLE) from 127.0.0.1:58172 2024-11-28T00:27:33,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000001/launch_container.sh] 2024-11-28T00:27:33,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000001/container_tokens] 2024-11-28T00:27:33,700 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0001/container_1732753513471_0001_01_000001/sysfs] 2024-11-28T00:27:33,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741934_1110 (size=903927) 2024-11-28T00:27:33,708 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741934_1110 (size=903927) 2024-11-28T00:27:33,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741934_1110 (size=903927) 2024-11-28T00:27:33,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741935_1111 (size=8360083) 2024-11-28T00:27:33,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741935_1111 (size=8360083) 2024-11-28T00:27:33,768 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741935_1111 (size=8360083) 2024-11-28T00:27:33,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741936_1112 (size=1877034) 2024-11-28T00:27:33,814 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741936_1112 (size=1877034) 2024-11-28T00:27:33,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741936_1112 (size=1877034) 2024-11-28T00:27:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741937_1113 (size=77835) 2024-11-28T00:27:33,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741937_1113 (size=77835) 2024-11-28T00:27:33,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741937_1113 (size=77835) 2024-11-28T00:27:33,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741938_1114 (size=30949) 2024-11-28T00:27:33,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741938_1114 (size=30949) 2024-11-28T00:27:33,883 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741938_1114 (size=30949) 2024-11-28T00:27:33,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741939_1115 (size=1597213) 2024-11-28T00:27:33,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741939_1115 (size=1597213) 2024-11-28T00:27:33,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741939_1115 (size=1597213) 2024-11-28T00:27:33,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:27:33,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741940_1116 (size=440957) 2024-11-28T00:27:33,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741940_1116 (size=440957) 2024-11-28T00:27:33,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741940_1116 (size=440957) 2024-11-28T00:27:34,009 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741941_1117 (size=4695811) 2024-11-28T00:27:34,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741941_1117 (size=4695811) 2024-11-28T00:27:34,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741941_1117 (size=4695811) 2024-11-28T00:27:34,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741942_1118 (size=232957) 2024-11-28T00:27:34,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741942_1118 (size=232957) 2024-11-28T00:27:34,032 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741942_1118 (size=232957) 2024-11-28T00:27:34,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741943_1119 (size=127628) 2024-11-28T00:27:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741943_1119 (size=127628) 2024-11-28T00:27:34,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741943_1119 (size=127628) 2024-11-28T00:27:34,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741944_1120 (size=6424743) 2024-11-28T00:27:34,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741944_1120 (size=6424743) 2024-11-28T00:27:34,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741944_1120 (size=6424743) 2024-11-28T00:27:34,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741945_1121 (size=20406) 2024-11-28T00:27:34,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741945_1121 (size=20406) 2024-11-28T00:27:34,167 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741945_1121 (size=20406) 2024-11-28T00:27:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741946_1122 (size=5175431) 2024-11-28T00:27:34,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741946_1122 (size=5175431) 2024-11-28T00:27:34,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741946_1122 (size=5175431) 2024-11-28T00:27:34,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741947_1123 (size=217634) 2024-11-28T00:27:34,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741947_1123 (size=217634) 2024-11-28T00:27:34,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741947_1123 (size=217634) 2024-11-28T00:27:34,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741948_1124 (size=1832290) 2024-11-28T00:27:34,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741948_1124 (size=1832290) 2024-11-28T00:27:34,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741948_1124 (size=1832290) 2024-11-28T00:27:34,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741949_1125 (size=322274) 2024-11-28T00:27:34,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741949_1125 (size=322274) 2024-11-28T00:27:34,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741949_1125 (size=322274) 2024-11-28T00:27:34,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741950_1126 (size=503880) 2024-11-28T00:27:34,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741950_1126 (size=503880) 2024-11-28T00:27:34,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741950_1126 (size=503880) 2024-11-28T00:27:34,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741951_1127 (size=29229) 2024-11-28T00:27:34,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741951_1127 (size=29229) 2024-11-28T00:27:34,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741951_1127 (size=29229) 2024-11-28T00:27:34,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741952_1128 (size=24096) 2024-11-28T00:27:34,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741952_1128 (size=24096) 2024-11-28T00:27:34,399 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741952_1128 (size=24096) 2024-11-28T00:27:34,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741953_1129 (size=111872) 2024-11-28T00:27:34,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741953_1129 (size=111872) 2024-11-28T00:27:34,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741953_1129 (size=111872) 2024-11-28T00:27:34,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741954_1130 (size=45609) 2024-11-28T00:27:34,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741954_1130 (size=45609) 2024-11-28T00:27:34,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741954_1130 (size=45609) 2024-11-28T00:27:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741955_1131 (size=136454) 2024-11-28T00:27:34,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741955_1131 (size=136454) 2024-11-28T00:27:34,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741955_1131 (size=136454) 2024-11-28T00:27:34,474 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:27:34,477 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithTargetName' hfile list 2024-11-28T00:27:34,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.4 K 2024-11-28T00:27:34,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-11-28T00:27:34,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-11-28T00:27:34,480 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:27:34,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:27:34,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:27:34,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741956_1132 (size=1031) 2024-11-28T00:27:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:27:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:27:34,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741957_1133 (size=35) 2024-11-28T00:27:34,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741958_1134 (size=303997) 2024-11-28T00:27:34,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741958_1134 (size=303997) 2024-11-28T00:27:34,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741958_1134 (size=303997) 2024-11-28T00:27:34,613 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:27:34,613 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:27:34,670 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:27:34,692 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:58188 2024-11-28T00:27:35,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-28T00:27:35,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName Metrics about Tables on a single HBase RegionServer 2024-11-28T00:27:35,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:35,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSplitRegion 2024-11-28T00:27:41,047 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:27:42,569 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:40826 2024-11-28T00:27:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741959_1135 (size=349695) 2024-11-28T00:27:43,068 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741959_1135 (size=349695) 2024-11-28T00:27:43,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741959_1135 (size=349695) 2024-11-28T00:27:44,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:46674 2024-11-28T00:27:44,854 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:41254 2024-11-28T00:27:45,715 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:46688 2024-11-28T00:27:45,721 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:41264 2024-11-28T00:27:47,881 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0002_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:27:51,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741960_1136 (size=14745) 2024-11-28T00:27:51,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741960_1136 (size=14745) 2024-11-28T00:27:51,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741960_1136 (size=14745) 2024-11-28T00:27:51,517 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000002/launch_container.sh] 2024-11-28T00:27:51,517 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000002/container_tokens] 2024-11-28T00:27:51,517 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000002/sysfs] 2024-11-28T00:27:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741962_1138 (size=6116) 2024-11-28T00:27:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741962_1138 (size=6116) 2024-11-28T00:27:53,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741962_1138 (size=6116) 2024-11-28T00:27:54,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000004/launch_container.sh] 2024-11-28T00:27:54,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000004/container_tokens] 2024-11-28T00:27:54,050 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000004/sysfs] 2024-11-28T00:27:54,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741963_1139 (size=5171) 2024-11-28T00:27:54,113 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741963_1139 (size=5171) 2024-11-28T00:27:54,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741963_1139 (size=5171) 2024-11-28T00:27:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741964_1140 (size=8101) 2024-11-28T00:27:54,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741964_1140 (size=8101) 2024-11-28T00:27:54,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741964_1140 (size=8101) 2024-11-28T00:27:54,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741961_1137 (size=31745) 2024-11-28T00:27:54,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741961_1137 (size=31745) 2024-11-28T00:27:54,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741961_1137 (size=31745) 2024-11-28T00:27:54,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:27:54,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:27:54,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741965_1141 (size=465) 2024-11-28T00:27:54,295 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000005/launch_container.sh] 2024-11-28T00:27:54,295 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000005/container_tokens] 2024-11-28T00:27:54,295 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000005/sysfs] 2024-11-28T00:27:54,313 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000003/launch_container.sh] 2024-11-28T00:27:54,313 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000003/container_tokens] 2024-11-28T00:27:54,313 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000003/sysfs] 2024-11-28T00:27:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741966_1142 (size=31745) 2024-11-28T00:27:54,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741966_1142 (size=31745) 2024-11-28T00:27:54,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741966_1142 (size=31745) 2024-11-28T00:27:54,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741967_1143 (size=349695) 2024-11-28T00:27:54,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741967_1143 (size=349695) 2024-11-28T00:27:54,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741967_1143 (size=349695) 2024-11-28T00:27:54,363 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:55288 2024-11-28T00:27:54,372 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:36654 2024-11-28T00:27:54,382 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:55304 2024-11-28T00:27:55,904 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:27:55,906 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:27:55,913 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: testExportWithTargetName 2024-11-28T00:27:55,913 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:27:55,914 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:27:55,914 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName 2024-11-28T00:27:55,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName/.snapshotinfo 2024-11-28T00:27:55,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithTargetName/data.manifest 2024-11-28T00:27:55,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/testExportWithTargetName at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/testExportWithTargetName 2024-11-28T00:27:55,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/testExportWithTargetName/.snapshotinfo 2024-11-28T00:27:55,915 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753651992/.hbase-snapshot/testExportWithTargetName/data.manifest 2024-11-28T00:27:55,924 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithTargetName 2024-11-28T00:27:55,926 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=56, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:55,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:27:55,929 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753675929"}]},"ts":"1732753675929"} 2024-11-28T00:27:55,932 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLING in hbase:meta 2024-11-28T00:27:55,932 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithTargetName to state=DISABLING 2024-11-28T00:27:55,933 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=57, ppid=56, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName}] 2024-11-28T00:27:55,936 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, UNASSIGN}, {pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, UNASSIGN}] 2024-11-28T00:27:55,937 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, UNASSIGN 2024-11-28T00:27:55,937 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, UNASSIGN 2024-11-28T00:27:55,938 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=07a29275a235142b029c2db26d7133da, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:27:55,939 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=eb1846f897c221e883336771cdcfcd5a, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:55,941 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=58, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, UNASSIGN because future has completed 2024-11-28T00:27:55,941 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:55,941 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:27:55,943 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=59, ppid=57, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, UNASSIGN because future has completed 2024-11-28T00:27:55,943 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:27:55,943 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:27:56,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:27:56,095 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(122): Close 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:56,095 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:56,095 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1722): Closing 07a29275a235142b029c2db26d7133da, disabling compactions & flushes 2024-11-28T00:27:56,095 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:56,095 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:56,095 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. after waiting 0 ms 2024-11-28T00:27:56,095 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:56,097 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(122): Close eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:56,097 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:27:56,098 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1722): Closing eb1846f897c221e883336771cdcfcd5a, disabling compactions & flushes 2024-11-28T00:27:56,098 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1755): Closing region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:56,098 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:56,098 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. after waiting 0 ms 2024-11-28T00:27:56,098 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:56,107 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:27:56,108 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:56,108 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da. 2024-11-28T00:27:56,108 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] regionserver.HRegion(1676): Region close journal for 07a29275a235142b029c2db26d7133da: Waiting for close lock at 1732753676095Running coprocessor pre-close hooks at 1732753676095Disabling compacts and flushes for region at 1732753676095Disabling writes for close at 1732753676095Writing region close event to WAL at 1732753676096 (+1 ms)Running coprocessor post-close hooks at 1732753676107 (+11 ms)Closed at 1732753676108 (+1 ms) 2024-11-28T00:27:56,110 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=60}] handler.UnassignRegionHandler(157): Closed 07a29275a235142b029c2db26d7133da 2024-11-28T00:27:56,111 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=58 updating hbase:meta row=07a29275a235142b029c2db26d7133da, regionState=CLOSED 2024-11-28T00:27:56,113 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=60, ppid=58, state=RUNNABLE, hasLock=false; CloseRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:27:56,114 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:27:56,115 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:27:56,115 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1973): Closed testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a. 2024-11-28T00:27:56,115 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] regionserver.HRegion(1676): Region close journal for eb1846f897c221e883336771cdcfcd5a: Waiting for close lock at 1732753676098Running coprocessor pre-close hooks at 1732753676098Disabling compacts and flushes for region at 1732753676098Disabling writes for close at 1732753676098Writing region close event to WAL at 1732753676099 (+1 ms)Running coprocessor post-close hooks at 1732753676115 (+16 ms)Closed at 1732753676115 2024-11-28T00:27:56,116 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=60, resume processing ppid=58 2024-11-28T00:27:56,117 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=60, ppid=58, state=SUCCESS, hasLock=false; CloseRegionProcedure 07a29275a235142b029c2db26d7133da, server=72c8c7f04009,41621,1732753506207 in 173 msec 2024-11-28T00:27:56,118 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=61}] handler.UnassignRegionHandler(157): Closed eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:56,118 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=59 updating hbase:meta row=eb1846f897c221e883336771cdcfcd5a, regionState=CLOSED 2024-11-28T00:27:56,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=58, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=07a29275a235142b029c2db26d7133da, UNASSIGN in 182 msec 2024-11-28T00:27:56,121 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=61, ppid=59, state=RUNNABLE, hasLock=false; CloseRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:27:56,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=61, resume processing ppid=59 2024-11-28T00:27:56,124 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=61, ppid=59, state=SUCCESS, hasLock=false; CloseRegionProcedure eb1846f897c221e883336771cdcfcd5a, server=72c8c7f04009,43335,1732753506004 in 179 msec 2024-11-28T00:27:56,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=59, resume processing ppid=57 2024-11-28T00:27:56,126 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=59, ppid=57, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithTargetName, region=eb1846f897c221e883336771cdcfcd5a, UNASSIGN in 189 msec 2024-11-28T00:27:56,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=57, resume processing ppid=56 2024-11-28T00:27:56,129 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=57, ppid=56, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithTargetName in 194 msec 2024-11-28T00:27:56,130 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753676130"}]},"ts":"1732753676130"} 2024-11-28T00:27:56,132 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithTargetName, state=DISABLED in hbase:meta 2024-11-28T00:27:56,132 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithTargetName to state=DISABLED 2024-11-28T00:27:56,134 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=56, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithTargetName in 209 msec 2024-11-28T00:27:56,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=56 2024-11-28T00:27:56,241 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:27:56,242 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithTargetName 2024-11-28T00:27:56,243 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,244 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=62, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithTargetName 2024-11-28T00:27:56,245 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=62, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,248 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithTargetName 2024-11-28T00:27:56,249 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da 2024-11-28T00:27:56,249 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:56,251 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,252 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/recovered.edits] 2024-11-28T00:27:56,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,252 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,252 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/recovered.edits] 2024-11-28T00:27:56,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:27:56,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:27:56,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:27:56,253 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithTargetName with data PBUF 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithTargetName 2024-11-28T00:27:56,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,256 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-28T00:27:56,259 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/cf/4fab0ec7fe3e4b72bc48bf1dfe27c51b 2024-11-28T00:27:56,259 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/cf/3595e9df409a43d48105742754f24b72 2024-11-28T00:27:56,262 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da/recovered.edits/9.seqid 2024-11-28T00:27:56,262 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a/recovered.edits/9.seqid 2024-11-28T00:27:56,263 DEBUG [HFileArchiver-6 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/07a29275a235142b029c2db26d7133da 2024-11-28T00:27:56,263 DEBUG [HFileArchiver-7 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithTargetName/eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:56,263 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithTargetName regions 2024-11-28T00:27:56,264 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-28T00:27:56,265 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf] 2024-11-28T00:27:56,268 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/c4ca4238a0b923820dcc509a6f75849b202411288ab42332b6e1429bb5781ab30598e6a1_eb1846f897c221e883336771cdcfcd5a 2024-11-28T00:27:56,270 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71/cf/d41d8cd98f00b204e9800998ecf8427e202411280c7819ce238b4beb8a009046752ebd1a_07a29275a235142b029c2db26d7133da 2024-11-28T00:27:56,270 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithTargetName/90c8eeaaf01a24f585da11044309be71 2024-11-28T00:27:56,273 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=62, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,276 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithTargetName from hbase:meta 2024-11-28T00:27:56,279 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithTargetName' descriptor. 2024-11-28T00:27:56,280 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=62, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,280 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithTargetName' from region states. 2024-11-28T00:27:56,281 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753676280"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:56,281 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753676280"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:56,283 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:27:56,283 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 07a29275a235142b029c2db26d7133da, NAME => 'testtb-testExportWithTargetName,,1732753649936.07a29275a235142b029c2db26d7133da.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => eb1846f897c221e883336771cdcfcd5a, NAME => 'testtb-testExportWithTargetName,1,1732753649936.eb1846f897c221e883336771cdcfcd5a.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:27:56,283 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithTargetName' as deleted. 2024-11-28T00:27:56,283 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithTargetName","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753676283"}]},"ts":"9223372036854775807"} 2024-11-28T00:27:56,285 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithTargetName state from META 2024-11-28T00:27:56,286 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=62, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithTargetName 2024-11-28T00:27:56,288 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=62, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithTargetName in 44 msec 2024-11-28T00:27:56,361 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=62 2024-11-28T00:27:56,361 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithTargetName 2024-11-28T00:27:56,361 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithTargetName completed 2024-11-28T00:27:56,369 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithTargetName" type: DISABLED 2024-11-28T00:27:56,371 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithTargetName 2024-11-28T00:27:56,374 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithTargetName" type: DISABLED 2024-11-28T00:27:56,376 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithTargetName 2024-11-28T00:27:56,409 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithTargetName Thread=788 (was 750) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:33086 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: ApplicationMasterLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 21832) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:34967 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34967 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39789 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:52850 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async disk worker #1 for volume /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1615850115_1 at /127.0.0.1:46288 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37297 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39789 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ContainersLauncher #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-2071 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:46304 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HFileArchiver-6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1615850115_1 at /127.0.0.1:52824 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38561 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool.commonPool-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) - Thread LEAK? -, OpenFileDescriptor=793 (was 765) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=614 (was 465) - SystemLoadAverage LEAK? -, ProcessCount=14 (was 19), AvailableMemoryMB=4047 (was 1172) - AvailableMemoryMB LEAK? - 2024-11-28T00:27:56,409 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-28T00:27:56,429 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=788, OpenFileDescriptor=793, MaxFileDescriptor=1048576, SystemLoadAverage=614, ProcessCount=15, AvailableMemoryMB=4044 2024-11-28T00:27:56,429 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-28T00:27:56,431 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:27:56,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:27:56,433 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:27:56,433 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithResetTtl" procId is: 63 2024-11-28T00:27:56,434 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:27:56,435 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:27:56,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:27:56,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:27:56,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741968_1144 (size=440) 2024-11-28T00:27:56,447 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 2bfdc119724c45a502c5d7a8528a53e7, NAME => 'testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:56,447 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 4670378fa01e0ebd58277d5c264c6def, NAME => 'testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:27:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:27:56,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741969_1145 (size=65) 2024-11-28T00:27:56,457 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 2bfdc119724c45a502c5d7a8528a53e7, disabling compactions & flushes 2024-11-28T00:27:56,458 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. after waiting 0 ms 2024-11-28T00:27:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,458 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,458 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 2bfdc119724c45a502c5d7a8528a53e7: Waiting for close lock at 1732753676458Disabling compacts and flushes for region at 1732753676458Disabling writes for close at 1732753676458Writing region close event to WAL at 1732753676458Closed at 1732753676458 2024-11-28T00:27:56,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:27:56,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:27:56,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741970_1146 (size=65) 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing 4670378fa01e0ebd58277d5c264c6def, disabling compactions & flushes 2024-11-28T00:27:56,463 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. after waiting 0 ms 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,463 INFO [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,463 DEBUG [RegionOpenAndInit-testtb-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for 4670378fa01e0ebd58277d5c264c6def: Waiting for close lock at 1732753676463Disabling compacts and flushes for region at 1732753676463Disabling writes for close at 1732753676463Writing region close event to WAL at 1732753676463Closed at 1732753676463 2024-11-28T00:27:56,465 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:27:56,465 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753676465"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753676465"}]},"ts":"1732753676465"} 2024-11-28T00:27:56,465 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753676465"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753676465"}]},"ts":"1732753676465"} 2024-11-28T00:27:56,469 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:27:56,470 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:27:56,470 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753676470"}]},"ts":"1732753676470"} 2024-11-28T00:27:56,473 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-28T00:27:56,473 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:27:56,475 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:27:56,475 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:27:56,475 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:27:56,475 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:27:56,475 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, ASSIGN}, {pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, ASSIGN}] 2024-11-28T00:27:56,477 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, ASSIGN 2024-11-28T00:27:56,477 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, ASSIGN 2024-11-28T00:27:56,478 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:27:56,479 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:27:56,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:27:56,629 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:27:56,629 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=2bfdc119724c45a502c5d7a8528a53e7, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:56,629 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=4670378fa01e0ebd58277d5c264c6def, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:27:56,633 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=65, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, ASSIGN because future has completed 2024-11-28T00:27:56,633 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:27:56,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=64, ppid=63, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, ASSIGN because future has completed 2024-11-28T00:27:56,636 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:27:56,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:27:56,793 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,793 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7752): Opening region: {ENCODED => 4670378fa01e0ebd58277d5c264c6def, NAME => 'testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:27:56,794 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. service=AccessControlService 2024-11-28T00:27:56,794 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:56,794 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,794 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:56,795 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7794): checking encryption for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,795 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(7797): checking classloading for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,796 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(132): Open testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,796 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7752): Opening region: {ENCODED => 2bfdc119724c45a502c5d7a8528a53e7, NAME => 'testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:27:56,797 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. service=AccessControlService 2024-11-28T00:27:56,797 INFO [StoreOpener-4670378fa01e0ebd58277d5c264c6def-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,797 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:56,797 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithResetTtl 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,797 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(898): Instantiated testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:56,797 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7794): checking encryption for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,797 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(7797): checking classloading for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,799 INFO [StoreOpener-2bfdc119724c45a502c5d7a8528a53e7-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,800 INFO [StoreOpener-4670378fa01e0ebd58277d5c264c6def-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 4670378fa01e0ebd58277d5c264c6def columnFamilyName cf 2024-11-28T00:27:56,801 DEBUG [StoreOpener-4670378fa01e0ebd58277d5c264c6def-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:56,802 INFO [StoreOpener-2bfdc119724c45a502c5d7a8528a53e7-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2bfdc119724c45a502c5d7a8528a53e7 columnFamilyName cf 2024-11-28T00:27:56,802 INFO [StoreOpener-4670378fa01e0ebd58277d5c264c6def-1 {}] regionserver.HStore(327): Store=4670378fa01e0ebd58277d5c264c6def/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:56,802 DEBUG [StoreOpener-2bfdc119724c45a502c5d7a8528a53e7-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:56,802 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1038): replaying wal for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,803 INFO [StoreOpener-2bfdc119724c45a502c5d7a8528a53e7-1 {}] regionserver.HStore(327): Store=2bfdc119724c45a502c5d7a8528a53e7/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:56,803 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1038): replaying wal for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,803 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,804 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,804 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1048): stopping wal replay for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,804 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1060): Cleaning up temporary data for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,805 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,805 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,806 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1048): stopping wal replay for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,806 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1060): Cleaning up temporary data for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,807 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1093): writing seq id for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,808 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1093): writing seq id for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,812 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:56,813 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1114): Opened 4670378fa01e0ebd58277d5c264c6def; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71793931, jitterRate=0.06981293857097626}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:56,813 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:56,814 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegion(1006): Region open journal for 4670378fa01e0ebd58277d5c264c6def: Running coprocessor pre-open hook at 1732753676795Writing region info on filesystem at 1732753676795Initializing all the Stores at 1732753676796 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753676796Cleaning up temporary data from old regions at 1732753676804 (+8 ms)Running coprocessor post-open hooks at 1732753676813 (+9 ms)Region opened successfully at 1732753676814 (+1 ms) 2024-11-28T00:27:56,815 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def., pid=66, masterSystemTime=1732753676787 2024-11-28T00:27:56,817 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:56,818 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,818 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=66}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:56,818 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1114): Opened 2bfdc119724c45a502c5d7a8528a53e7; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59007331, jitterRate=-0.12072224915027618}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:56,818 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:56,818 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegion(1006): Region open journal for 2bfdc119724c45a502c5d7a8528a53e7: Running coprocessor pre-open hook at 1732753676797Writing region info on filesystem at 1732753676797Initializing all the Stores at 1732753676798 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753676798Cleaning up temporary data from old regions at 1732753676806 (+8 ms)Running coprocessor post-open hooks at 1732753676818 (+12 ms)Region opened successfully at 1732753676818 2024-11-28T00:27:56,822 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7., pid=67, masterSystemTime=1732753676790 2024-11-28T00:27:56,822 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=65 updating hbase:meta row=4670378fa01e0ebd58277d5c264c6def, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:27:56,825 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=66, ppid=65, state=RUNNABLE, hasLock=false; OpenRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:27:56,825 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,825 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=67}] handler.AssignRegionHandler(153): Opened testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:56,826 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=64 updating hbase:meta row=2bfdc119724c45a502c5d7a8528a53e7, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:56,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=67, ppid=64, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:27:56,830 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=66, resume processing ppid=65 2024-11-28T00:27:56,832 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=66, ppid=65, state=SUCCESS, hasLock=false; OpenRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207 in 193 msec 2024-11-28T00:27:56,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=67, resume processing ppid=64 2024-11-28T00:27:56,833 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=67, ppid=64, state=SUCCESS, hasLock=false; OpenRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004 in 194 msec 2024-11-28T00:27:56,833 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=65, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, ASSIGN in 356 msec 2024-11-28T00:27:56,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=64, resume processing ppid=63 2024-11-28T00:27:56,836 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=64, ppid=63, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, ASSIGN in 358 msec 2024-11-28T00:27:56,837 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:27:56,837 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753676837"}]},"ts":"1732753676837"} 2024-11-28T00:27:56,840 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-28T00:27:56,840 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=63, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:27:56,841 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithResetTtl jenkins: RWXCA 2024-11-28T00:27:56,845 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:27:56,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,863 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:56,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:56,886 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:56,887 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:56,888 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:56,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=63, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithResetTtl in 456 msec 2024-11-28T00:27:57,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=63 2024-11-28T00:27:57,062 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:27:57,062 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,066 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-28T00:27:57,066 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,066 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:57,068 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,075 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,082 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,086 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:27:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753677086 (current time:1732753677086). 2024-11-28T00:27:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:27:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:27:57,086 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:27:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@701869d4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:57,088 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:57,089 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:57,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:57,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:57,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36eb96aa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:57,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:57,090 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,091 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38436, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:57,091 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7989ee37, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,092 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:57,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:57,093 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,094 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41850, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,096 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,096 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,096 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:57,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e54f10a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:57,097 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:57,098 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:57,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:57,098 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:57,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@c01b514, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:57,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:57,099 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,100 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38448, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:57,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6947a672, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:57,102 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:57,103 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,104 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41864, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:27:57,106 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,107 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60470, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,108 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:57,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:57,108 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,109 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:57,109 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:27:57,110 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:27:57,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:27:57,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-28T00:27:57,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:27:57,112 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:27:57,114 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:27:57,116 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:27:57,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:27:57,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:27:57,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741971_1147 (size=161) 2024-11-28T00:27:57,129 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:27:57,130 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7}, {pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def}] 2024-11-28T00:27:57,131 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,131 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:27:57,283 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=69 2024-11-28T00:27:57,284 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=70 2024-11-28T00:27:57,284 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.HRegion(2603): Flush status journal for 4670378fa01e0ebd58277d5c264c6def: 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.HRegion(2603): Flush status journal for 2bfdc119724c45a502c5d7a8528a53e7: 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. for emptySnaptb0-testExportWithResetTtl completed. 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.' region-info for snapshot=emptySnaptb0-testExportWithResetTtl 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:57,285 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:27:57,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:27:57,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:27:57,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741973_1149 (size=68) 2024-11-28T00:27:57,324 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,325 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=69}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=69 2024-11-28T00:27:57,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=69 2024-11-28T00:27:57,325 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,326 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=69, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,328 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=69, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 in 197 msec 2024-11-28T00:27:57,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:27:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:27:57,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741972_1148 (size=68) 2024-11-28T00:27:57,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:57,339 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=70}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=70 2024-11-28T00:27:57,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=70 2024-11-28T00:27:57,340 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithResetTtl on region 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,340 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=70, ppid=68, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=70, resume processing ppid=68 2024-11-28T00:27:57,343 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=70, ppid=68, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def in 211 msec 2024-11-28T00:27:57,343 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:27:57,344 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:27:57,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:27:57,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:27:57,345 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:57,346 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:27:57,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:27:57,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:27:57,358 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741974_1150 (size=60) 2024-11-28T00:27:57,362 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:27:57,362 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithResetTtl 2024-11-28T00:27:57,362 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl 2024-11-28T00:27:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:27:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:27:57,386 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741975_1151 (size=641) 2024-11-28T00:27:57,394 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:27:57,402 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:27:57,402 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithResetTtl to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportWithResetTtl 2024-11-28T00:27:57,404 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=68, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:27:57,404 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 68 2024-11-28T00:27:57,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=68, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=68, snapshot={ ss=emptySnaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 295 msec 2024-11-28T00:27:57,431 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=68 2024-11-28T00:27:57,431 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:27:57,439 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:57,442 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:57,444 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,447 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithResetTtl 2024-11-28T00:27:57,447 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,448 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:57,449 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,455 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,461 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithResetTtl,, stopping at row=testtb-testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:57,465 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:27:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753677465 (current time:1732753677465). 2024-11-28T00:27:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:27:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:27:57,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:27:57,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@187fa2ac, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:57,466 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:57,467 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45e39da2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:57,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,468 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38472, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:57,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2db73167, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:57,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:57,470 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,472 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41878, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,474 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a384dcb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:57,475 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:57,476 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:57,476 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:57,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:57,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39e6648b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:57,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:57,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,478 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38498, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:57,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65026e7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:57,479 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:57,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:57,480 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,482 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41888, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:27:57,484 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:57,485 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:57,486 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:57,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:57,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:27:57,487 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:57,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:27:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } 2024-11-28T00:27:57,489 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-28T00:27:57,490 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:27:57,491 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:27:57,492 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:27:57,496 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:27:57,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:27:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:27:57,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741976_1152 (size=156) 2024-11-28T00:27:57,509 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:27:57,509 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7}, {pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def}] 2024-11-28T00:27:57,511 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,512 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,601 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:27:57,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=73 2024-11-28T00:27:57,665 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=72 2024-11-28T00:27:57,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:57,665 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,665 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2902): Flushing 2bfdc119724c45a502c5d7a8528a53e7 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:27:57,665 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2902): Flushing 4670378fa01e0ebd58277d5c264c6def 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:27:57,704 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def is 71, key is 104c74645b40082d7b77ca92004e868a/cf:q/1732753677442/Put/seqid=0 2024-11-28T00:27:57,712 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 is 71, key is 0c7d3d23d63ed81f22556463443dba0a/cf:q/1732753677439/Put/seqid=0 2024-11-28T00:27:57,716 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741977_1153 (size=8241) 2024-11-28T00:27:57,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741977_1153 (size=8241) 2024-11-28T00:27:57,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741977_1153 (size=8241) 2024-11-28T00:27:57,720 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:57,726 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,727 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/.tmp/cf/0b6c174558fb40189434465c9caad6b9, store: [table=testtb-testExportWithResetTtl family=cf region=4670378fa01e0ebd58277d5c264c6def] 2024-11-28T00:27:57,728 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/.tmp/cf/0b6c174558fb40189434465c9caad6b9 is 206, key is 1eff847d1168a450a5b1cb02be667e227/cf:q/1732753677442/Put/seqid=0 2024-11-28T00:27:57,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741978_1154 (size=5032) 2024-11-28T00:27:57,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741978_1154 (size=5032) 2024-11-28T00:27:57,735 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741978_1154 (size=5032) 2024-11-28T00:27:57,735 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:57,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741979_1155 (size=15055) 2024-11-28T00:27:57,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741979_1155 (size=15055) 2024-11-28T00:27:57,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741979_1155 (size=15055) 2024-11-28T00:27:57,741 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/.tmp/cf/0b6c174558fb40189434465c9caad6b9 2024-11-28T00:27:57,743 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,753 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/.tmp/cf/1f3a4fb716e84263a959d0f05805b922, store: [table=testtb-testExportWithResetTtl family=cf region=2bfdc119724c45a502c5d7a8528a53e7] 2024-11-28T00:27:57,755 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/.tmp/cf/1f3a4fb716e84263a959d0f05805b922 is 206, key is 005d336e4b90f6bc39a7c29b6b3a9b196/cf:q/1732753677439/Put/seqid=0 2024-11-28T00:27:57,762 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/.tmp/cf/0b6c174558fb40189434465c9caad6b9 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9 2024-11-28T00:27:57,770 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9, entries=48, sequenceid=6, filesize=14.7 K 2024-11-28T00:27:57,772 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 4670378fa01e0ebd58277d5c264c6def in 106ms, sequenceid=6, compaction requested=false 2024-11-28T00:27:57,772 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithResetTtl' 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.HRegion(2603): Flush status journal for 4670378fa01e0ebd58277d5c264c6def: 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. for snaptb0-testExportWithResetTtl completed. 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9] hfiles 2024-11-28T00:27:57,773 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9 for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:27:57,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741980_1156 (size=5700) 2024-11-28T00:27:57,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741980_1156 (size=5700) 2024-11-28T00:27:57,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741980_1156 (size=5700) 2024-11-28T00:27:57,838 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/.tmp/cf/1f3a4fb716e84263a959d0f05805b922 2024-11-28T00:27:57,847 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/.tmp/cf/1f3a4fb716e84263a959d0f05805b922 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922 2024-11-28T00:27:57,855 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:27:57,856 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 2bfdc119724c45a502c5d7a8528a53e7 in 191ms, sequenceid=6, compaction requested=false 2024-11-28T00:27:57,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.HRegion(2603): Flush status journal for 2bfdc119724c45a502c5d7a8528a53e7: 2024-11-28T00:27:57,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. for snaptb0-testExportWithResetTtl completed. 2024-11-28T00:27:57,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.' region-info for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,856 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:57,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922] hfiles 2024-11-28T00:27:57,857 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922 for snapshot=snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:27:57,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:27:57,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:27:57,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741981_1157 (size=107) 2024-11-28T00:27:57,860 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=73}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=73 2024-11-28T00:27:57,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=73 2024-11-28T00:27:57,861 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,861 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=73, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,870 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=73, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 4670378fa01e0ebd58277d5c264c6def in 354 msec 2024-11-28T00:27:57,877 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:27:57,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:27:57,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741982_1158 (size=107) 2024-11-28T00:27:57,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:27:57,879 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=72}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=72 2024-11-28T00:27:57,880 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=72 2024-11-28T00:27:57,880 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithResetTtl on region 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,880 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=72, ppid=71, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,883 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=72, resume processing ppid=71 2024-11-28T00:27:57,884 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=72, ppid=71, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7 in 372 msec 2024-11-28T00:27:57,884 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:27:57,885 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:27:57,886 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:27:57,886 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:27:57,886 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:57,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7] hfiles 2024-11-28T00:27:57,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:27:57,890 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:27:57,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:27:57,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:27:57,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741983_1159 (size=291) 2024-11-28T00:27:57,921 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:27:57,921 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,922 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:27:57,962 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:27:57,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741984_1160 (size=951) 2024-11-28T00:27:57,971 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:27:57,982 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:27:57,982 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithResetTtl to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithResetTtl 2024-11-28T00:27:57,984 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=71, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:27:57,984 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 }, snapshot procedure id = 71 2024-11-28T00:27:57,986 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=71, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=71, snapshot={ ss=snaptb0-testExportWithResetTtl table=testtb-testExportWithResetTtl type=FLUSH ttl=0 } in 496 msec 2024-11-28T00:27:58,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=71 2024-11-28T00:27:58,122 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:27:58,124 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:27:58,126 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportWithResetTtl 2024-11-28T00:27:58,127 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:27:58,127 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportWithResetTtl" procId is: 74 2024-11-28T00:27:58,129 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:27:58,129 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:27:58,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:27:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:27:58,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741985_1161 (size=433) 2024-11-28T00:27:58,165 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 116f5f29dcbbc80eeb96e627af862a3a, NAME => 'testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:58,175 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => b4d3571a18c0c71be2bd253059ab6da4, NAME => 'testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportWithResetTtl', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:27:58,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:27:58,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:27:58,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741986_1162 (size=58) 2024-11-28T00:27:58,206 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:58,206 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1722): Closing 116f5f29dcbbc80eeb96e627af862a3a, disabling compactions & flushes 2024-11-28T00:27:58,206 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. after waiting 0 ms 2024-11-28T00:27:58,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,207 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,207 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-0 {}] regionserver.HRegion(1676): Region close journal for 116f5f29dcbbc80eeb96e627af862a3a: Waiting for close lock at 1732753678206Disabling compacts and flushes for region at 1732753678206Disabling writes for close at 1732753678207 (+1 ms)Writing region close event to WAL at 1732753678207Closed at 1732753678207 2024-11-28T00:27:58,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:27:58,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:27:58,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:27:58,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741987_1163 (size=58) 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1722): Closing b4d3571a18c0c71be2bd253059ab6da4, disabling compactions & flushes 2024-11-28T00:27:58,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. after waiting 0 ms 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,248 INFO [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,248 DEBUG [RegionOpenAndInit-testExportWithResetTtl-pool-1 {}] regionserver.HRegion(1676): Region close journal for b4d3571a18c0c71be2bd253059ab6da4: Waiting for close lock at 1732753678248Disabling compacts and flushes for region at 1732753678248Disabling writes for close at 1732753678248Writing region close event to WAL at 1732753678248Closed at 1732753678248 2024-11-28T00:27:58,250 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:27:58,250 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732753678250"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753678250"}]},"ts":"1732753678250"} 2024-11-28T00:27:58,250 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.","families":{"info":[{"qualifier":"regioninfo","vlen":57,"tag":[],"timestamp":"1732753678250"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753678250"}]},"ts":"1732753678250"} 2024-11-28T00:27:58,253 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:27:58,254 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:27:58,255 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753678254"}]},"ts":"1732753678254"} 2024-11-28T00:27:58,257 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLING in hbase:meta 2024-11-28T00:27:58,257 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:27:58,260 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:27:58,260 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:27:58,260 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:27:58,260 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:27:58,260 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, ASSIGN}, {pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, ASSIGN}] 2024-11-28T00:27:58,262 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, ASSIGN 2024-11-28T00:27:58,263 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, ASSIGN 2024-11-28T00:27:58,264 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:27:58,265 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:27:58,415 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:27:58,415 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=b4d3571a18c0c71be2bd253059ab6da4, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:58,415 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=116f5f29dcbbc80eeb96e627af862a3a, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:27:58,419 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=76, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, ASSIGN because future has completed 2024-11-28T00:27:58,419 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:27:58,422 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=75, ppid=74, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, ASSIGN because future has completed 2024-11-28T00:27:58,422 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:27:58,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:27:58,577 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,577 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7752): Opening region: {ENCODED => b4d3571a18c0c71be2bd253059ab6da4, NAME => 'testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:27:58,578 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. service=AccessControlService 2024-11-28T00:27:58,578 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:58,578 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,579 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:58,579 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7794): checking encryption for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,579 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(7797): checking classloading for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,583 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(132): Open testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,583 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7752): Opening region: {ENCODED => 116f5f29dcbbc80eeb96e627af862a3a, NAME => 'testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:27:58,583 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. service=AccessControlService 2024-11-28T00:27:58,583 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:27:58,584 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportWithResetTtl 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,584 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(898): Instantiated testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:27:58,584 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7794): checking encryption for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,584 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(7797): checking classloading for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,588 INFO [StoreOpener-116f5f29dcbbc80eeb96e627af862a3a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,591 INFO [StoreOpener-116f5f29dcbbc80eeb96e627af862a3a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 116f5f29dcbbc80eeb96e627af862a3a columnFamilyName cf 2024-11-28T00:27:58,596 INFO [StoreOpener-b4d3571a18c0c71be2bd253059ab6da4-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,598 INFO [StoreOpener-b4d3571a18c0c71be2bd253059ab6da4-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region b4d3571a18c0c71be2bd253059ab6da4 columnFamilyName cf 2024-11-28T00:27:58,599 DEBUG [StoreOpener-116f5f29dcbbc80eeb96e627af862a3a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:58,599 DEBUG [StoreOpener-b4d3571a18c0c71be2bd253059ab6da4-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:58,600 INFO [StoreOpener-b4d3571a18c0c71be2bd253059ab6da4-1 {}] regionserver.HStore(327): Store=b4d3571a18c0c71be2bd253059ab6da4/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:58,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1038): replaying wal for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,601 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,602 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,602 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1048): stopping wal replay for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,602 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1060): Cleaning up temporary data for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,603 INFO [StoreOpener-116f5f29dcbbc80eeb96e627af862a3a-1 {}] regionserver.HStore(327): Store=116f5f29dcbbc80eeb96e627af862a3a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:27:58,603 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1038): replaying wal for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,604 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,606 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,607 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1093): writing seq id for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,607 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1048): stopping wal replay for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,607 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1060): Cleaning up temporary data for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,610 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1093): writing seq id for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:58,613 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1114): Opened b4d3571a18c0c71be2bd253059ab6da4; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=64541805, jitterRate=-0.03825215995311737}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:58,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1122): Running coprocessor post-open hooks for b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:58,614 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegion(1006): Region open journal for b4d3571a18c0c71be2bd253059ab6da4: Running coprocessor pre-open hook at 1732753678579Writing region info on filesystem at 1732753678579Initializing all the Stores at 1732753678580 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753678580Cleaning up temporary data from old regions at 1732753678602 (+22 ms)Running coprocessor post-open hooks at 1732753678613 (+11 ms)Region opened successfully at 1732753678614 (+1 ms) 2024-11-28T00:27:58,615 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4., pid=77, masterSystemTime=1732753678573 2024-11-28T00:27:58,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,618 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=77}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:58,618 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=76 updating hbase:meta row=b4d3571a18c0c71be2bd253059ab6da4, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:27:58,621 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=77, ppid=76, state=RUNNABLE, hasLock=false; OpenRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:27:58,622 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:27:58,623 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1114): Opened 116f5f29dcbbc80eeb96e627af862a3a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59034197, jitterRate=-0.12032191455364227}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:27:58,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:58,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegion(1006): Region open journal for 116f5f29dcbbc80eeb96e627af862a3a: Running coprocessor pre-open hook at 1732753678584Writing region info on filesystem at 1732753678584Initializing all the Stores at 1732753678586 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753678586Cleaning up temporary data from old regions at 1732753678607 (+21 ms)Running coprocessor post-open hooks at 1732753678623 (+16 ms)Region opened successfully at 1732753678623 2024-11-28T00:27:58,629 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a., pid=78, masterSystemTime=1732753678576 2024-11-28T00:27:58,631 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,631 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=78}] handler.AssignRegionHandler(153): Opened testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=77, resume processing ppid=76 2024-11-28T00:27:58,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=77, ppid=76, state=SUCCESS, hasLock=false; OpenRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004 in 210 msec 2024-11-28T00:27:58,632 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=75 updating hbase:meta row=116f5f29dcbbc80eeb96e627af862a3a, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:27:58,634 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=76, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, ASSIGN in 372 msec 2024-11-28T00:27:58,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=78, ppid=75, state=RUNNABLE, hasLock=false; OpenRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:27:58,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=78, resume processing ppid=75 2024-11-28T00:27:58,637 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=78, ppid=75, state=SUCCESS, hasLock=false; OpenRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332 in 213 msec 2024-11-28T00:27:58,639 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=75, resume processing ppid=74 2024-11-28T00:27:58,640 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=75, ppid=74, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, ASSIGN in 377 msec 2024-11-28T00:27:58,641 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:27:58,641 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753678641"}]},"ts":"1732753678641"} 2024-11-28T00:27:58,643 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=ENABLED in hbase:meta 2024-11-28T00:27:58,644 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=74, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportWithResetTtl execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:27:58,645 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testExportWithResetTtl jenkins: RWXCA 2024-11-28T00:27:58,649 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:27:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:58,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,661 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,662 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,662 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF\x0A<\x0A\x07jenkins\x121\x08\x03"-\x0A!\x0A\x07default\x12\x16testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:27:58,677 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=74, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportWithResetTtl in 536 msec 2024-11-28T00:27:58,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=74 2024-11-28T00:27:58,761 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:27:58,761 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-28T00:27:58,766 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,766 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:58,768 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,777 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,809 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,837 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:58,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:27:58,842 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportWithResetTtl 2024-11-28T00:27:58,845 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:58,846 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:27:58,848 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,860 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,868 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportWithResetTtl,, stopping at row=testExportWithResetTtl ,, for max=2147483647 with caching=100 2024-11-28T00:27:58,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-28T00:27:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753678872 (current time:1732753678872). 2024-11-28T00:27:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb-testExportWithResetTtl VERSION not specified, setting to 2 2024-11-28T00:27:58,872 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:27:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1215aa50, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,894 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:58,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:58,896 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:58,896 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:58,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:58,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44d8007c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:58,897 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:58,898 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,899 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:58,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@42989473, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:58,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:58,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:58,903 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:58,905 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,906 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ac3067a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:27:58,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:27:58,919 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:27:58,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:27:58,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:27:58,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3197ec95, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:27:58,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:27:58,921 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,922 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:27:58,923 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cea455f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:27:58,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:27:58,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:27:58,928 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:58,930 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41910, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:58,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportWithResetTtl', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:27:58,932 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:27:58,934 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:27:58,935 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:27:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:27:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:27:58,935 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:27:58,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testExportWithResetTtl], kv [jenkins: RWXCA] 2024-11-28T00:27:58,936 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:27:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } 2024-11-28T00:27:58,938 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-28T00:27:58,939 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:27:58,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:27:58,942 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:27:58,946 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:27:58,999 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:27:59,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:27:59,000 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741988_1164 (size=143) 2024-11-28T00:27:59,003 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:27:59,003 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a}, {pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b4d3571a18c0c71be2bd253059ab6da4}] 2024-11-28T00:27:59,005 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:59,005 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:59,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:27:59,157 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=81 2024-11-28T00:27:59,158 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:59,158 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=80 2024-11-28T00:27:59,159 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:59,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2902): Flushing b4d3571a18c0c71be2bd253059ab6da4 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:27:59,159 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2902): Flushing 116f5f29dcbbc80eeb96e627af862a3a 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:27:59,225 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a is 71, key is 073b2c19b9fb6952a75a068179dc0e08/cf:q/1732753678837/Put/seqid=0 2024-11-28T00:27:59,254 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 is 71, key is 10953a7f7eb3c1d00189c0bf8b2cab42/cf:q/1732753678839/Put/seqid=0 2024-11-28T00:27:59,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:27:59,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741989_1165 (size=5102) 2024-11-28T00:27:59,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741989_1165 (size=5102) 2024-11-28T00:27:59,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741989_1165 (size=5102) 2024-11-28T00:27:59,272 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:59,279 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:59,281 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/.tmp/cf/6d2ea29143db4f1db1af8b217a6fc479, store: [table=testExportWithResetTtl family=cf region=116f5f29dcbbc80eeb96e627af862a3a] 2024-11-28T00:27:59,282 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/.tmp/cf/6d2ea29143db4f1db1af8b217a6fc479 is 199, key is 0453f7877a9013eadc52c4eb10b65df37/cf:q/1732753678837/Put/seqid=0 2024-11-28T00:27:59,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741990_1166 (size=8171) 2024-11-28T00:27:59,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741990_1166 (size=8171) 2024-11-28T00:27:59,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741990_1166 (size=8171) 2024-11-28T00:27:59,296 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:59,305 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:59,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/.tmp/cf/1becbf6f5b6b40b9aa940e2ea5842b7f, store: [table=testExportWithResetTtl family=cf region=b4d3571a18c0c71be2bd253059ab6da4] 2024-11-28T00:27:59,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/.tmp/cf/1becbf6f5b6b40b9aa940e2ea5842b7f is 199, key is 1b991a49ec42b9750e9abf54a0a7452c6/cf:q/1732753678839/Put/seqid=0 2024-11-28T00:27:59,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741991_1167 (size=5878) 2024-11-28T00:27:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741991_1167 (size=5878) 2024-11-28T00:27:59,333 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741991_1167 (size=5878) 2024-11-28T00:27:59,334 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/.tmp/cf/6d2ea29143db4f1db1af8b217a6fc479 2024-11-28T00:27:59,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741992_1168 (size=14517) 2024-11-28T00:27:59,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741992_1168 (size=14517) 2024-11-28T00:27:59,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741992_1168 (size=14517) 2024-11-28T00:27:59,341 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/.tmp/cf/1becbf6f5b6b40b9aa940e2ea5842b7f 2024-11-28T00:27:59,346 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/.tmp/cf/6d2ea29143db4f1db1af8b217a6fc479 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479 2024-11-28T00:27:59,364 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479, entries=3, sequenceid=5, filesize=5.7 K 2024-11-28T00:27:59,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/.tmp/cf/1becbf6f5b6b40b9aa940e2ea5842b7f as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f 2024-11-28T00:27:59,365 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 116f5f29dcbbc80eeb96e627af862a3a in 206ms, sequenceid=5, compaction requested=false 2024-11-28T00:27:59,365 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportWithResetTtl' 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.HRegion(2603): Flush status journal for 116f5f29dcbbc80eeb96e627af862a3a: 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. for snaptb-testExportWithResetTtl completed. 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479] hfiles 2024-11-28T00:27:59,368 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479 for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:27:59,381 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f, entries=47, sequenceid=5, filesize=14.2 K 2024-11-28T00:27:59,383 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for b4d3571a18c0c71be2bd253059ab6da4 in 225ms, sequenceid=5, compaction requested=false 2024-11-28T00:27:59,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.HRegion(2603): Flush status journal for b4d3571a18c0c71be2bd253059ab6da4: 2024-11-28T00:27:59,383 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. for snaptb-testExportWithResetTtl completed. 2024-11-28T00:27:59,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(241): Storing 'testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.' region-info for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:27:59,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:27:59,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f] hfiles 2024-11-28T00:27:59,384 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f for snapshot=snaptb-testExportWithResetTtl 2024-11-28T00:27:59,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:27:59,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:27:59,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741993_1169 (size=100) 2024-11-28T00:27:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:27:59,422 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=80}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=80 2024-11-28T00:27:59,423 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=80 2024-11-28T00:27:59,423 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:59,423 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=80, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:59,426 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=80, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a in 421 msec 2024-11-28T00:27:59,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:27:59,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:27:59,440 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741994_1170 (size=100) 2024-11-28T00:27:59,440 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:27:59,440 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=81}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=81 2024-11-28T00:27:59,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=81 2024-11-28T00:27:59,441 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb-testExportWithResetTtl on region b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:59,441 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=81, ppid=79, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:59,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=81, resume processing ppid=79 2024-11-28T00:27:59,446 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=81, ppid=79, state=SUCCESS, hasLock=false; SnapshotRegionProcedure b4d3571a18c0c71be2bd253059ab6da4 in 439 msec 2024-11-28T00:27:59,446 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:27:59,447 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:27:59,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:27:59,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:27:59,452 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:27:59,454 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a] hfiles 2024-11-28T00:27:59,454 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:27:59,454 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:27:59,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:27:59,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:27:59,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741995_1171 (size=284) 2024-11-28T00:27:59,515 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:27:59,515 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb-testExportWithResetTtl 2024-11-28T00:27:59,516 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:27:59,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:27:59,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:27:59,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741996_1172 (size=923) 2024-11-28T00:27:59,572 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:27:59,573 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:27:59,611 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:27:59,612 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:27:59,614 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=79, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:27:59,614 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 }, snapshot procedure id = 79 2024-11-28T00:27:59,616 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=79, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=79, snapshot={ ss=snaptb-testExportWithResetTtl table=testExportWithResetTtl type=FLUSH ttl=100000 } in 677 msec 2024-11-28T00:28:00,081 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=79 2024-11-28T00:28:00,082 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:28:00,095 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094 2024-11-28T00:28:00,095 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:00,148 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:00,149 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:28:00,165 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:28:00,176 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/.tmp/snaptb-testExportWithResetTtl 2024-11-28T00:28:00,237 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:28:00,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:28:00,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741997_1173 (size=923) 2024-11-28T00:28:00,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:28:00,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:28:00,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741998_1174 (size=143) 2024-11-28T00:28:00,299 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:28:00,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:28:00,301 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741999_1175 (size=141) 2024-11-28T00:28:00,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:00,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:00,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:00,526 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0002_000001 (auth:SIMPLE) from 127.0.0.1:36662 2024-11-28T00:28:01,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-14598445724166966369.jar 2024-11-28T00:28:01,630 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,631 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,653 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:28:01,722 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-1285823680435561620.jar 2024-11-28T00:28:01,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,723 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,724 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:01,725 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:28:01,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:28:01,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:28:01,726 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:28:01,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:28:01,727 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:28:01,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:28:01,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:28:01,728 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:28:01,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:28:01,729 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:28:01,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:01,730 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:01,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:01,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:01,731 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:01,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:01,732 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:01,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742000_1176 (size=131440) 2024-11-28T00:28:01,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742000_1176 (size=131440) 2024-11-28T00:28:01,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742000_1176 (size=131440) 2024-11-28T00:28:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742001_1177 (size=4188619) 2024-11-28T00:28:01,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742001_1177 (size=4188619) 2024-11-28T00:28:01,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742001_1177 (size=4188619) 2024-11-28T00:28:01,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742002_1178 (size=1323991) 2024-11-28T00:28:01,931 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742002_1178 (size=1323991) 2024-11-28T00:28:01,932 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742002_1178 (size=1323991) 2024-11-28T00:28:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742003_1179 (size=903927) 2024-11-28T00:28:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742003_1179 (size=903927) 2024-11-28T00:28:01,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742003_1179 (size=903927) 2024-11-28T00:28:02,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742004_1180 (size=8360083) 2024-11-28T00:28:02,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742004_1180 (size=8360083) 2024-11-28T00:28:02,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742004_1180 (size=8360083) 2024-11-28T00:28:02,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742005_1181 (size=1877034) 2024-11-28T00:28:02,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742005_1181 (size=1877034) 2024-11-28T00:28:02,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742005_1181 (size=1877034) 2024-11-28T00:28:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742006_1182 (size=77835) 2024-11-28T00:28:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742006_1182 (size=77835) 2024-11-28T00:28:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742006_1182 (size=77835) 2024-11-28T00:28:02,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742007_1183 (size=30949) 2024-11-28T00:28:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742007_1183 (size=30949) 2024-11-28T00:28:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742007_1183 (size=30949) 2024-11-28T00:28:02,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742008_1184 (size=1597213) 2024-11-28T00:28:02,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742008_1184 (size=1597213) 2024-11-28T00:28:02,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742008_1184 (size=1597213) 2024-11-28T00:28:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742009_1185 (size=4695811) 2024-11-28T00:28:02,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742009_1185 (size=4695811) 2024-11-28T00:28:02,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742009_1185 (size=4695811) 2024-11-28T00:28:02,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742010_1186 (size=232957) 2024-11-28T00:28:02,256 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742010_1186 (size=232957) 2024-11-28T00:28:02,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742010_1186 (size=232957) 2024-11-28T00:28:02,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742011_1187 (size=127628) 2024-11-28T00:28:02,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742011_1187 (size=127628) 2024-11-28T00:28:02,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742011_1187 (size=127628) 2024-11-28T00:28:02,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742012_1188 (size=20406) 2024-11-28T00:28:02,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742012_1188 (size=20406) 2024-11-28T00:28:02,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742012_1188 (size=20406) 2024-11-28T00:28:02,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742013_1189 (size=440957) 2024-11-28T00:28:02,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742013_1189 (size=440957) 2024-11-28T00:28:02,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742013_1189 (size=440957) 2024-11-28T00:28:02,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742014_1190 (size=5175431) 2024-11-28T00:28:02,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742014_1190 (size=5175431) 2024-11-28T00:28:02,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742014_1190 (size=5175431) 2024-11-28T00:28:02,921 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742015_1191 (size=217634) 2024-11-28T00:28:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742015_1191 (size=217634) 2024-11-28T00:28:02,922 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742015_1191 (size=217634) 2024-11-28T00:28:02,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742016_1192 (size=6424743) 2024-11-28T00:28:02,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742016_1192 (size=6424743) 2024-11-28T00:28:02,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742016_1192 (size=6424743) 2024-11-28T00:28:03,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742017_1193 (size=1832290) 2024-11-28T00:28:03,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742017_1193 (size=1832290) 2024-11-28T00:28:03,045 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742017_1193 (size=1832290) 2024-11-28T00:28:03,064 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742018_1194 (size=322274) 2024-11-28T00:28:03,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742018_1194 (size=322274) 2024-11-28T00:28:03,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742018_1194 (size=322274) 2024-11-28T00:28:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742019_1195 (size=503880) 2024-11-28T00:28:03,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742019_1195 (size=503880) 2024-11-28T00:28:03,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742019_1195 (size=503880) 2024-11-28T00:28:03,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742020_1196 (size=29229) 2024-11-28T00:28:03,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742020_1196 (size=29229) 2024-11-28T00:28:03,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742020_1196 (size=29229) 2024-11-28T00:28:03,231 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742021_1197 (size=24096) 2024-11-28T00:28:03,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742021_1197 (size=24096) 2024-11-28T00:28:03,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742021_1197 (size=24096) 2024-11-28T00:28:03,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742022_1198 (size=111872) 2024-11-28T00:28:03,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742022_1198 (size=111872) 2024-11-28T00:28:03,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742022_1198 (size=111872) 2024-11-28T00:28:03,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742023_1199 (size=45609) 2024-11-28T00:28:03,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742023_1199 (size=45609) 2024-11-28T00:28:03,304 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742023_1199 (size=45609) 2024-11-28T00:28:03,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742024_1200 (size=136454) 2024-11-28T00:28:03,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742024_1200 (size=136454) 2024-11-28T00:28:03,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742024_1200 (size=136454) 2024-11-28T00:28:03,326 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:28:03,330 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb-testExportWithResetTtl' hfile list 2024-11-28T00:28:03,333 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.2 K 2024-11-28T00:28:03,333 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:28:03,333 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.7 K 2024-11-28T00:28:03,333 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:28:03,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:28:03,348 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:28:03,349 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742025_1201 (size=995) 2024-11-28T00:28:03,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:28:03,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:28:03,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742026_1202 (size=35) 2024-11-28T00:28:03,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742027_1203 (size=303992) 2024-11-28T00:28:03,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742027_1203 (size=303992) 2024-11-28T00:28:03,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742027_1203 (size=303992) 2024-11-28T00:28:03,417 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:03,417 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:03,518 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:43718 2024-11-28T00:28:03,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:28:05,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-28T00:28:05,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:28:05,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-28T00:28:05,542 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl Metrics about Tables on a single HBase RegionServer 2024-11-28T00:28:05,543 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithTargetName 2024-11-28T00:28:05,639 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000001/launch_container.sh] 2024-11-28T00:28:05,639 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000001/container_tokens] 2024-11-28T00:28:05,639 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0002/container_1732753513471_0002_01_000001/sysfs] 2024-11-28T00:28:09,110 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 116f5f29dcbbc80eeb96e627af862a3a changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:28:09,110 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 4670378fa01e0ebd58277d5c264c6def changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:28:09,110 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region b4d3571a18c0c71be2bd253059ab6da4 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:28:09,110 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 2bfdc119724c45a502c5d7a8528a53e7 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:28:11,048 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:28:12,230 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:34714 2024-11-28T00:28:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742028_1204 (size=349690) 2024-11-28T00:28:12,728 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742028_1204 (size=349690) 2024-11-28T00:28:12,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742028_1204 (size=349690) 2024-11-28T00:28:14,514 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:33838 2024-11-28T00:28:14,514 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:40130 2024-11-28T00:28:14,514 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:33852 2024-11-28T00:28:15,371 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:40132 2024-11-28T00:28:18,302 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0003_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:28:21,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742029_1205 (size=8171) 2024-11-28T00:28:21,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742029_1205 (size=8171) 2024-11-28T00:28:21,515 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742029_1205 (size=8171) 2024-11-28T00:28:23,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742031_1207 (size=14517) 2024-11-28T00:28:23,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742031_1207 (size=14517) 2024-11-28T00:28:23,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742031_1207 (size=14517) 2024-11-28T00:28:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742032_1208 (size=5102) 2024-11-28T00:28:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742032_1208 (size=5102) 2024-11-28T00:28:23,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742032_1208 (size=5102) 2024-11-28T00:28:24,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742033_1209 (size=5878) 2024-11-28T00:28:24,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742033_1209 (size=5878) 2024-11-28T00:28:24,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742033_1209 (size=5878) 2024-11-28T00:28:24,087 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000005/launch_container.sh] 2024-11-28T00:28:24,087 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000005/container_tokens] 2024-11-28T00:28:24,087 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000005/sysfs] 2024-11-28T00:28:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742030_1206 (size=31707) 2024-11-28T00:28:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742030_1206 (size=31707) 2024-11-28T00:28:24,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742030_1206 (size=31707) 2024-11-28T00:28:24,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:28:24,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:28:24,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742034_1210 (size=462) 2024-11-28T00:28:24,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000004/launch_container.sh] 2024-11-28T00:28:24,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000004/container_tokens] 2024-11-28T00:28:24,603 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000004/sysfs] 2024-11-28T00:28:24,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742035_1211 (size=31707) 2024-11-28T00:28:24,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742035_1211 (size=31707) 2024-11-28T00:28:24,623 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742035_1211 (size=31707) 2024-11-28T00:28:24,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742036_1212 (size=349690) 2024-11-28T00:28:24,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742036_1212 (size=349690) 2024-11-28T00:28:24,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742036_1212 (size=349690) 2024-11-28T00:28:24,692 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:48750 2024-11-28T00:28:25,760 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:28:25,762 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:28:25,773 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb-testExportWithResetTtl 2024-11-28T00:28:25,773 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:28:25,774 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:28:25,774 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:28:25,774 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-28T00:28:25,774 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-28T00:28:25,774 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/snaptb-testExportWithResetTtl at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/snaptb-testExportWithResetTtl 2024-11-28T00:28:25,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/snaptb-testExportWithResetTtl/.snapshotinfo 2024-11-28T00:28:25,775 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753680094/.hbase-snapshot/snaptb-testExportWithResetTtl/data.manifest 2024-11-28T00:28:25,783 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testExportWithResetTtl 2024-11-28T00:28:25,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=82, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:25,786 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:28:25,787 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753705787"}]},"ts":"1732753705787"} 2024-11-28T00:28:25,791 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-28T00:28:25,791 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(284): Set testExportWithResetTtl to state=DISABLING 2024-11-28T00:28:25,792 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=83, ppid=82, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl}] 2024-11-28T00:28:25,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, UNASSIGN}, {pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, UNASSIGN}] 2024-11-28T00:28:25,794 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, UNASSIGN 2024-11-28T00:28:25,794 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, UNASSIGN 2024-11-28T00:28:25,795 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=116f5f29dcbbc80eeb96e627af862a3a, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:28:25,795 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=b4d3571a18c0c71be2bd253059ab6da4, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:25,798 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=84, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, UNASSIGN because future has completed 2024-11-28T00:28:25,798 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:25,799 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:28:25,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=85, ppid=83, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, UNASSIGN because future has completed 2024-11-28T00:28:25,800 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:25,800 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:28:25,891 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:28:25,952 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(122): Close 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:28:25,952 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:25,952 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1722): Closing 116f5f29dcbbc80eeb96e627af862a3a, disabling compactions & flushes 2024-11-28T00:28:25,952 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:28:25,952 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. after waiting 1 ms 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:28:25,953 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(122): Close b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1722): Closing b4d3571a18c0c71be2bd253059ab6da4, disabling compactions & flushes 2024-11-28T00:28:25,953 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1755): Closing region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1776): Time limited wait for close lock on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1843): Acquired close lock on testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. after waiting 0 ms 2024-11-28T00:28:25,953 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1853): Updates disabled for region testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:28:25,958 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:28:25,958 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:28:25,959 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:25,959 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:25,959 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1973): Closed testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4. 2024-11-28T00:28:25,959 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1973): Closed testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a. 2024-11-28T00:28:25,959 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] regionserver.HRegion(1676): Region close journal for b4d3571a18c0c71be2bd253059ab6da4: Waiting for close lock at 1732753705953Running coprocessor pre-close hooks at 1732753705953Disabling compacts and flushes for region at 1732753705953Disabling writes for close at 1732753705953Writing region close event to WAL at 1732753705954 (+1 ms)Running coprocessor post-close hooks at 1732753705959 (+5 ms)Closed at 1732753705959 2024-11-28T00:28:25,959 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] regionserver.HRegion(1676): Region close journal for 116f5f29dcbbc80eeb96e627af862a3a: Waiting for close lock at 1732753705952Running coprocessor pre-close hooks at 1732753705952Disabling compacts and flushes for region at 1732753705952Disabling writes for close at 1732753705953 (+1 ms)Writing region close event to WAL at 1732753705953Running coprocessor post-close hooks at 1732753705959 (+6 ms)Closed at 1732753705959 2024-11-28T00:28:25,961 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=86}] handler.UnassignRegionHandler(157): Closed 116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:28:25,962 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=84 updating hbase:meta row=116f5f29dcbbc80eeb96e627af862a3a, regionState=CLOSED 2024-11-28T00:28:25,962 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=87}] handler.UnassignRegionHandler(157): Closed b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:28:25,963 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=85 updating hbase:meta row=b4d3571a18c0c71be2bd253059ab6da4, regionState=CLOSED 2024-11-28T00:28:25,964 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=86, ppid=84, state=RUNNABLE, hasLock=false; CloseRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:28:25,966 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=87, ppid=85, state=RUNNABLE, hasLock=false; CloseRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:28:25,967 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=86, resume processing ppid=84 2024-11-28T00:28:25,968 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=86, ppid=84, state=SUCCESS, hasLock=false; CloseRegionProcedure 116f5f29dcbbc80eeb96e627af862a3a, server=72c8c7f04009,36427,1732753506332 in 167 msec 2024-11-28T00:28:25,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=87, resume processing ppid=85 2024-11-28T00:28:25,969 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=87, ppid=85, state=SUCCESS, hasLock=false; CloseRegionProcedure b4d3571a18c0c71be2bd253059ab6da4, server=72c8c7f04009,43335,1732753506004 in 167 msec 2024-11-28T00:28:25,970 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=84, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=116f5f29dcbbc80eeb96e627af862a3a, UNASSIGN in 175 msec 2024-11-28T00:28:25,971 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=85, resume processing ppid=83 2024-11-28T00:28:25,971 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=85, ppid=83, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportWithResetTtl, region=b4d3571a18c0c71be2bd253059ab6da4, UNASSIGN in 176 msec 2024-11-28T00:28:25,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=83, resume processing ppid=82 2024-11-28T00:28:25,975 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=83, ppid=82, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testExportWithResetTtl in 179 msec 2024-11-28T00:28:25,977 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753705977"}]},"ts":"1732753705977"} 2024-11-28T00:28:25,979 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-28T00:28:25,979 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testExportWithResetTtl to state=DISABLED 2024-11-28T00:28:25,981 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=82, state=SUCCESS, hasLock=false; DisableTableProcedure table=testExportWithResetTtl in 197 msec 2024-11-28T00:28:26,101 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=82 2024-11-28T00:28:26,101 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:28:26,102 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testExportWithResetTtl 2024-11-28T00:28:26,103 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,105 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=88, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,105 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testExportWithResetTtl 2024-11-28T00:28:26,107 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=88, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,110 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testExportWithResetTtl 2024-11-28T00:28:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,117 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:28:26,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:28:26,118 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:28:26,119 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportWithResetTtl with data PBUF 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,121 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testExportWithResetTtl 2024-11-28T00:28:26,122 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,123 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-28T00:28:26,124 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:26,125 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:28:26,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:26,125 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:26,126 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithResetTtl \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:26,128 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/recovered.edits] 2024-11-28T00:28:26,133 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:28:26,137 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/recovered.edits] 2024-11-28T00:28:26,139 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/cf/6d2ea29143db4f1db1af8b217a6fc479 2024-11-28T00:28:26,144 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/recovered.edits/8.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a/recovered.edits/8.seqid 2024-11-28T00:28:26,145 DEBUG [HFileArchiver-8 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:28:26,147 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/cf/1becbf6f5b6b40b9aa940e2ea5842b7f 2024-11-28T00:28:26,155 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/recovered.edits/8.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4/recovered.edits/8.seqid 2024-11-28T00:28:26,156 DEBUG [HFileArchiver-9 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportWithResetTtl/b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:28:26,156 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(313): Archived testExportWithResetTtl regions 2024-11-28T00:28:26,157 DEBUG [PEWorker-1 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-28T00:28:26,158 DEBUG [PEWorker-1 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf] 2024-11-28T00:28:26,163 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/c4ca4238a0b923820dcc509a6f75849b20241128c95b2d739f4a4a25bb8689d8554cfcbb_b4d3571a18c0c71be2bd253059ab6da4 2024-11-28T00:28:26,165 DEBUG [PEWorker-1 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d/cf/d41d8cd98f00b204e9800998ecf8427e20241128c77b4726c27241b88911e87c7fca9593_116f5f29dcbbc80eeb96e627af862a3a 2024-11-28T00:28:26,165 DEBUG [PEWorker-1 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportWithResetTtl/ea1f2913460bad1dc9f5d962c597c09d 2024-11-28T00:28:26,168 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=88, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,173 WARN [PEWorker-1 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testExportWithResetTtl from hbase:meta 2024-11-28T00:28:26,178 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(407): Removing 'testExportWithResetTtl' descriptor. 2024-11-28T00:28:26,179 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=88, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,179 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(397): Removing 'testExportWithResetTtl' from region states. 2024-11-28T00:28:26,180 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753706180"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,181 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753706180"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,185 INFO [PEWorker-1 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:28:26,185 DEBUG [PEWorker-1 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 116f5f29dcbbc80eeb96e627af862a3a, NAME => 'testExportWithResetTtl,,1732753678123.116f5f29dcbbc80eeb96e627af862a3a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => b4d3571a18c0c71be2bd253059ab6da4, NAME => 'testExportWithResetTtl,1,1732753678123.b4d3571a18c0c71be2bd253059ab6da4.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:28:26,185 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(401): Marking 'testExportWithResetTtl' as deleted. 2024-11-28T00:28:26,185 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753706185"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,188 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(867): Deleted table testExportWithResetTtl state from META 2024-11-28T00:28:26,189 DEBUG [PEWorker-1 {}] procedure.DeleteTableProcedure(133): Finished pid=88, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testExportWithResetTtl 2024-11-28T00:28:26,196 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=88, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testExportWithResetTtl in 87 msec 2024-11-28T00:28:26,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=88 2024-11-28T00:28:26,232 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testExportWithResetTtl 2024-11-28T00:28:26,232 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testExportWithResetTtl completed 2024-11-28T00:28:26,233 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithResetTtl 2024-11-28T00:28:26,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=89, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:28:26,237 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753706237"}]},"ts":"1732753706237"} 2024-11-28T00:28:26,240 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLING in hbase:meta 2024-11-28T00:28:26,240 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithResetTtl to state=DISABLING 2024-11-28T00:28:26,241 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=90, ppid=89, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl}] 2024-11-28T00:28:26,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, UNASSIGN}, {pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, UNASSIGN}] 2024-11-28T00:28:26,245 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, UNASSIGN 2024-11-28T00:28:26,245 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, UNASSIGN 2024-11-28T00:28:26,246 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=4670378fa01e0ebd58277d5c264c6def, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:26,246 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=2bfdc119724c45a502c5d7a8528a53e7, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:26,253 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=92, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, UNASSIGN because future has completed 2024-11-28T00:28:26,253 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:26,253 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:28:26,254 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=91, ppid=90, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, UNASSIGN because future has completed 2024-11-28T00:28:26,255 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:26,255 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:28:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:28:26,408 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(122): Close 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:28:26,408 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:26,408 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1722): Closing 4670378fa01e0ebd58277d5c264c6def, disabling compactions & flushes 2024-11-28T00:28:26,408 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:28:26,408 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:28:26,408 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. after waiting 0 ms 2024-11-28T00:28:26,408 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:28:26,409 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(122): Close 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:28:26,409 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:26,409 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1722): Closing 2bfdc119724c45a502c5d7a8528a53e7, disabling compactions & flushes 2024-11-28T00:28:26,409 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1755): Closing region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:28:26,409 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:28:26,409 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. after waiting 0 ms 2024-11-28T00:28:26,409 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:28:26,435 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:28:26,436 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:26,436 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7. 2024-11-28T00:28:26,436 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] regionserver.HRegion(1676): Region close journal for 2bfdc119724c45a502c5d7a8528a53e7: Waiting for close lock at 1732753706409Running coprocessor pre-close hooks at 1732753706409Disabling compacts and flushes for region at 1732753706409Disabling writes for close at 1732753706409Writing region close event to WAL at 1732753706429 (+20 ms)Running coprocessor post-close hooks at 1732753706436 (+7 ms)Closed at 1732753706436 2024-11-28T00:28:26,440 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=94}] handler.UnassignRegionHandler(157): Closed 2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:28:26,441 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=91 updating hbase:meta row=2bfdc119724c45a502c5d7a8528a53e7, regionState=CLOSED 2024-11-28T00:28:26,443 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=94, ppid=91, state=RUNNABLE, hasLock=false; CloseRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:28:26,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=94, resume processing ppid=91 2024-11-28T00:28:26,447 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=94, ppid=91, state=SUCCESS, hasLock=false; CloseRegionProcedure 2bfdc119724c45a502c5d7a8528a53e7, server=72c8c7f04009,43335,1732753506004 in 190 msec 2024-11-28T00:28:26,450 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=91, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=2bfdc119724c45a502c5d7a8528a53e7, UNASSIGN in 203 msec 2024-11-28T00:28:26,452 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:28:26,453 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:26,453 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1973): Closed testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def. 2024-11-28T00:28:26,453 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] regionserver.HRegion(1676): Region close journal for 4670378fa01e0ebd58277d5c264c6def: Waiting for close lock at 1732753706408Running coprocessor pre-close hooks at 1732753706408Disabling compacts and flushes for region at 1732753706408Disabling writes for close at 1732753706408Writing region close event to WAL at 1732753706426 (+18 ms)Running coprocessor post-close hooks at 1732753706453 (+27 ms)Closed at 1732753706453 2024-11-28T00:28:26,455 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=93}] handler.UnassignRegionHandler(157): Closed 4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:28:26,455 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=92 updating hbase:meta row=4670378fa01e0ebd58277d5c264c6def, regionState=CLOSED 2024-11-28T00:28:26,458 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=93, ppid=92, state=RUNNABLE, hasLock=false; CloseRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:28:26,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=93, resume processing ppid=92 2024-11-28T00:28:26,462 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=93, ppid=92, state=SUCCESS, hasLock=false; CloseRegionProcedure 4670378fa01e0ebd58277d5c264c6def, server=72c8c7f04009,41621,1732753506207 in 207 msec 2024-11-28T00:28:26,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=92, resume processing ppid=90 2024-11-28T00:28:26,464 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=92, ppid=90, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithResetTtl, region=4670378fa01e0ebd58277d5c264c6def, UNASSIGN in 218 msec 2024-11-28T00:28:26,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=90, resume processing ppid=89 2024-11-28T00:28:26,468 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=90, ppid=89, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithResetTtl in 225 msec 2024-11-28T00:28:26,470 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753706470"}]},"ts":"1732753706470"} 2024-11-28T00:28:26,473 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithResetTtl, state=DISABLED in hbase:meta 2024-11-28T00:28:26,473 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithResetTtl to state=DISABLED 2024-11-28T00:28:26,476 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=89, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithResetTtl in 241 msec 2024-11-28T00:28:26,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=89 2024-11-28T00:28:26,552 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:28:26,552 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithResetTtl 2024-11-28T00:28:26,554 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,555 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=95, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithResetTtl 2024-11-28T00:28:26,557 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=95, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,567 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithResetTtl 2024-11-28T00:28:26,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:28:26,572 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:28:26,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:28:26,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:28:26,574 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data PBUF 2024-11-28T00:28:26,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-28T00:28:26,576 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-28T00:28:26,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,576 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,576 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-28T00:28:26,576 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-28T00:28:26,577 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,577 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithResetTtl with data null 2024-11-28T00:28:26,577 INFO [zk-permission-watcher-pool-0 {}] access.AuthManager(136): Skipping permission cache refresh because writable data is empty 2024-11-28T00:28:26,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithResetTtl 2024-11-28T00:28:26,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:26,579 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-28T00:28:26,584 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:28:26,588 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:28:26,591 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/recovered.edits] 2024-11-28T00:28:26,596 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/cf/1f3a4fb716e84263a959d0f05805b922 2024-11-28T00:28:26,598 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/recovered.edits] 2024-11-28T00:28:26,607 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7/recovered.edits/9.seqid 2024-11-28T00:28:26,608 DEBUG [HFileArchiver-10 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:28:26,610 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/cf/0b6c174558fb40189434465c9caad6b9 2024-11-28T00:28:26,614 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def/recovered.edits/9.seqid 2024-11-28T00:28:26,615 DEBUG [HFileArchiver-11 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithResetTtl/4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:28:26,615 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithResetTtl regions 2024-11-28T00:28:26,616 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-28T00:28:26,617 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf] 2024-11-28T00:28:26,622 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/c4ca4238a0b923820dcc509a6f75849b202411280463646844aa41ce8ad9d050211b78d9_4670378fa01e0ebd58277d5c264c6def 2024-11-28T00:28:26,624 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e/cf/d41d8cd98f00b204e9800998ecf8427e202411286e1a449f9b0e444cba7230cf36027377_2bfdc119724c45a502c5d7a8528a53e7 2024-11-28T00:28:26,624 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithResetTtl/1bf72cc28ee4e494fb2ee93a7ed9fe6e 2024-11-28T00:28:26,628 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=95, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,632 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithResetTtl from hbase:meta 2024-11-28T00:28:26,635 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithResetTtl' descriptor. 2024-11-28T00:28:26,640 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=95, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,640 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithResetTtl' from region states. 2024-11-28T00:28:26,640 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753706640"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,641 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753706640"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,647 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:28:26,647 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 2bfdc119724c45a502c5d7a8528a53e7, NAME => 'testtb-testExportWithResetTtl,,1732753676430.2bfdc119724c45a502c5d7a8528a53e7.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 4670378fa01e0ebd58277d5c264c6def, NAME => 'testtb-testExportWithResetTtl,1,1732753676430.4670378fa01e0ebd58277d5c264c6def.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:28:26,647 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithResetTtl' as deleted. 2024-11-28T00:28:26,647 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithResetTtl","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753706647"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:26,650 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithResetTtl state from META 2024-11-28T00:28:26,651 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=95, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithResetTtl 2024-11-28T00:28:26,653 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=95, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithResetTtl in 99 msec 2024-11-28T00:28:26,691 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=95 2024-11-28T00:28:26,692 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithResetTtl 2024-11-28T00:28:26,692 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithResetTtl completed 2024-11-28T00:28:26,704 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithResetTtl" type: DISABLED 2024-11-28T00:28:26,706 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithResetTtl 2024-11-28T00:28:26,708 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb-testExportWithResetTtl" type: DISABLED 2024-11-28T00:28:26,711 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb-testExportWithResetTtl 2024-11-28T00:28:26,713 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithResetTtl" type: DISABLED 2024-11-28T00:28:26,715 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithResetTtl 2024-11-28T00:28:26,760 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithResetTtl Thread=788 (was 788), OpenFileDescriptor=790 (was 793), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=792 (was 614) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 15) - ProcessCount LEAK? -, AvailableMemoryMB=3772 (was 4044) 2024-11-28T00:28:26,760 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-28T00:28:26,801 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=788, OpenFileDescriptor=790, MaxFileDescriptor=1048576, SystemLoadAverage=792, ProcessCount=17, AvailableMemoryMB=3766 2024-11-28T00:28:26,801 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=788 is superior to 500 2024-11-28T00:28:26,804 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:28:26,807 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:26,809 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:28:26,809 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemState" procId is: 96 2024-11-28T00:28:26,811 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:28:26,811 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:28:26,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:28:26,837 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:28:26,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742037_1213 (size=443) 2024-11-28T00:28:26,840 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 15dc58c251b5f06d0c462511cdce82d0, NAME => 'testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:26,853 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8be72578f6f15976c5529108ee2f7776, NAME => 'testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:28:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:28:26,886 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742038_1214 (size=68) 2024-11-28T00:28:26,887 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:26,888 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing 15dc58c251b5f06d0c462511cdce82d0, disabling compactions & flushes 2024-11-28T00:28:26,888 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:26,888 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:26,888 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. after waiting 0 ms 2024-11-28T00:28:26,888 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:26,888 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:26,888 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for 15dc58c251b5f06d0c462511cdce82d0: Waiting for close lock at 1732753706888Disabling compacts and flushes for region at 1732753706888Disabling writes for close at 1732753706888Writing region close event to WAL at 1732753706888Closed at 1732753706888 2024-11-28T00:28:26,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:28:26,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:28:26,894 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742039_1215 (size=68) 2024-11-28T00:28:26,894 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:26,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 8be72578f6f15976c5529108ee2f7776, disabling compactions & flushes 2024-11-28T00:28:26,895 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:26,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:26,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. after waiting 0 ms 2024-11-28T00:28:26,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:26,895 INFO [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:26,895 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8be72578f6f15976c5529108ee2f7776: Waiting for close lock at 1732753706895Disabling compacts and flushes for region at 1732753706895Disabling writes for close at 1732753706895Writing region close event to WAL at 1732753706895Closed at 1732753706895 2024-11-28T00:28:26,896 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:28:26,897 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732753706896"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753706896"}]},"ts":"1732753706896"} 2024-11-28T00:28:26,897 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732753706896"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753706896"}]},"ts":"1732753706896"} 2024-11-28T00:28:26,899 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:28:26,900 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:28:26,901 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753706901"}]},"ts":"1732753706901"} 2024-11-28T00:28:26,903 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLING in hbase:meta 2024-11-28T00:28:26,904 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:28:26,905 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:28:26,905 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:28:26,905 INFO [PEWorker-1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:28:26,905 DEBUG [PEWorker-1 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:28:26,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, ASSIGN}, {pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, ASSIGN}] 2024-11-28T00:28:26,910 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, ASSIGN 2024-11-28T00:28:26,911 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, ASSIGN 2024-11-28T00:28:26,912 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:28:26,913 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:28:26,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:28:27,044 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000003/launch_container.sh] 2024-11-28T00:28:27,044 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000003/container_tokens] 2024-11-28T00:28:27,044 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000003/sysfs] 2024-11-28T00:28:27,063 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:28:27,064 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=15dc58c251b5f06d0c462511cdce82d0, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:27,064 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=8be72578f6f15976c5529108ee2f7776, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:27,080 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=98, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, ASSIGN because future has completed 2024-11-28T00:28:27,080 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:28:27,083 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=97, ppid=96, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, ASSIGN because future has completed 2024-11-28T00:28:27,083 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:28:27,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:28:27,237 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:27,237 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7752): Opening region: {ENCODED => 8be72578f6f15976c5529108ee2f7776, NAME => 'testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:28:27,238 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. service=AccessControlService 2024-11-28T00:28:27,238 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:28:27,238 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,238 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:27,238 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7794): checking encryption for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,239 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(7797): checking classloading for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,240 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,240 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7752): Opening region: {ENCODED => 15dc58c251b5f06d0c462511cdce82d0, NAME => 'testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:28:27,240 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. service=AccessControlService 2024-11-28T00:28:27,241 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:28:27,241 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemState 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,241 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:27,241 INFO [StoreOpener-8be72578f6f15976c5529108ee2f7776-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,241 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7794): checking encryption for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,241 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(7797): checking classloading for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,243 INFO [StoreOpener-8be72578f6f15976c5529108ee2f7776-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8be72578f6f15976c5529108ee2f7776 columnFamilyName cf 2024-11-28T00:28:27,244 DEBUG [StoreOpener-8be72578f6f15976c5529108ee2f7776-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:27,245 INFO [StoreOpener-8be72578f6f15976c5529108ee2f7776-1 {}] regionserver.HStore(327): Store=8be72578f6f15976c5529108ee2f7776/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:28:27,245 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1038): replaying wal for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,246 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,247 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,247 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1048): stopping wal replay for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,247 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1060): Cleaning up temporary data for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,250 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1093): writing seq id for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,252 INFO [StoreOpener-15dc58c251b5f06d0c462511cdce82d0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,255 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:28:27,255 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1114): Opened 8be72578f6f15976c5529108ee2f7776; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75118755, jitterRate=0.1193566769361496}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:28:27,256 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,257 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegion(1006): Region open journal for 8be72578f6f15976c5529108ee2f7776: Running coprocessor pre-open hook at 1732753707239Writing region info on filesystem at 1732753707239Initializing all the Stores at 1732753707241 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753707241Cleaning up temporary data from old regions at 1732753707247 (+6 ms)Running coprocessor post-open hooks at 1732753707256 (+9 ms)Region opened successfully at 1732753707256 2024-11-28T00:28:27,258 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776., pid=99, masterSystemTime=1732753707233 2024-11-28T00:28:27,259 INFO [StoreOpener-15dc58c251b5f06d0c462511cdce82d0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 15dc58c251b5f06d0c462511cdce82d0 columnFamilyName cf 2024-11-28T00:28:27,261 DEBUG [StoreOpener-15dc58c251b5f06d0c462511cdce82d0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:27,262 INFO [StoreOpener-15dc58c251b5f06d0c462511cdce82d0-1 {}] regionserver.HStore(327): Store=15dc58c251b5f06d0c462511cdce82d0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:28:27,262 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:27,262 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=99}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:27,262 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1038): replaying wal for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,262 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=98 updating hbase:meta row=8be72578f6f15976c5529108ee2f7776, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:27,264 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,264 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,265 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1048): stopping wal replay for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,265 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1060): Cleaning up temporary data for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,267 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1093): writing seq id for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=99, ppid=98, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:28:27,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=99, resume processing ppid=98 2024-11-28T00:28:27,277 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=99, ppid=98, state=SUCCESS, hasLock=false; OpenRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207 in 195 msec 2024-11-28T00:28:27,279 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=98, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, ASSIGN in 371 msec 2024-11-28T00:28:27,288 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:28:27,289 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1114): Opened 15dc58c251b5f06d0c462511cdce82d0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60820749, jitterRate=-0.09370021522045135}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:28:27,289 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,289 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegion(1006): Region open journal for 15dc58c251b5f06d0c462511cdce82d0: Running coprocessor pre-open hook at 1732753707241Writing region info on filesystem at 1732753707241Initializing all the Stores at 1732753707242 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753707242Cleaning up temporary data from old regions at 1732753707265 (+23 ms)Running coprocessor post-open hooks at 1732753707289 (+24 ms)Region opened successfully at 1732753707289 2024-11-28T00:28:27,290 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0., pid=100, masterSystemTime=1732753707236 2024-11-28T00:28:27,293 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,293 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=100}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,296 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=97 updating hbase:meta row=15dc58c251b5f06d0c462511cdce82d0, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:27,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=100, ppid=97, state=RUNNABLE, hasLock=false; OpenRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:28:27,302 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=100, resume processing ppid=97 2024-11-28T00:28:27,303 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=100, ppid=97, state=SUCCESS, hasLock=false; OpenRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004 in 217 msec 2024-11-28T00:28:27,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=97, resume processing ppid=96 2024-11-28T00:28:27,305 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=97, ppid=96, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, ASSIGN in 397 msec 2024-11-28T00:28:27,306 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:28:27,306 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753707306"}]},"ts":"1732753707306"} 2024-11-28T00:28:27,308 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=ENABLED in hbase:meta 2024-11-28T00:28:27,310 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=96, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:28:27,310 DEBUG [PEWorker-4 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemState jenkins: RWXCA 2024-11-28T00:28:27,314 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:28:27,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:27,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:27,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:27,317 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:27,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:27,319 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:27,320 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:27,320 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:27,322 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=96, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemState in 516 msec 2024-11-28T00:28:27,441 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=96 2024-11-28T00:28:27,441 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:28:27,442 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:27,446 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-28T00:28:27,446 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,446 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:28:27,448 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:27,455 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:27,464 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:27,468 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:28:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753707468 (current time:1732753707468). 2024-11-28T00:28:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:28:27,468 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:28:27,469 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:28:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@75afe10b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:27,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:27,474 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:27,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:27,474 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:27,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4292da9c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:27,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:27,475 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,476 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55214, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:27,477 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7ff78bda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,478 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:27,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:27,479 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:27,481 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33660, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:27,483 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,483 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,483 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2a4daf15, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:27,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:27,489 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:27,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:27,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:27,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4af35eed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:27,490 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:27,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,491 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55222, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:27,492 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@79f96a2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:27,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:27,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:27,495 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:27,497 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:27,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:28:27,499 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:27,501 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51806, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:27,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:27,502 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:27,503 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:28:27,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:28:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:28:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-28T00:28:27,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:28:27,509 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:28:27,511 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:28:27,514 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:28:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:28:27,529 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:28:27,530 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742040_1216 (size=170) 2024-11-28T00:28:27,533 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:28:27,533 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0}, {pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776}] 2024-11-28T00:28:27,535 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,535 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:28:27,688 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=102 2024-11-28T00:28:27,688 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.HRegion(2603): Flush status journal for 15dc58c251b5f06d0c462511cdce82d0: 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. for emptySnaptb0-testExportFileSystemState completed. 2024-11-28T00:28:27,689 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=103 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.HRegion(2603): Flush status journal for 8be72578f6f15976c5529108ee2f7776: 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. for emptySnaptb0-testExportFileSystemState completed. 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.' region-info for snapshot=emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:27,689 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:28:27,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:28:27,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:28:27,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742042_1218 (size=71) 2024-11-28T00:28:27,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:27,706 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=102}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=102 2024-11-28T00:28:27,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=102 2024-11-28T00:28:27,707 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,707 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=102, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:27,712 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=102, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 in 175 msec 2024-11-28T00:28:27,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:28:27,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:28:27,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:27,734 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=103}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=103 2024-11-28T00:28:27,734 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742041_1217 (size=71) 2024-11-28T00:28:27,735 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=103 2024-11-28T00:28:27,735 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemState on region 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,735 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=103, ppid=101, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:27,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=103, resume processing ppid=101 2024-11-28T00:28:27,743 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:28:27,743 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=103, ppid=101, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 in 203 msec 2024-11-28T00:28:27,745 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:28:27,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:28:27,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:28:27,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:27,753 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:28:27,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:28:27,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:28:27,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742043_1219 (size=63) 2024-11-28T00:28:27,773 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:28:27,773 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:27,773 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:27,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:28:27,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:28:27,813 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742044_1220 (size=653) 2024-11-28T00:28:27,826 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:28:27,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:28:27,849 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:28:27,850 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:27,854 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=101, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:28:27,854 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 101 2024-11-28T00:28:27,856 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=101, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=101, snapshot={ ss=emptySnaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 350 msec 2024-11-28T00:28:28,142 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=101 2024-11-28T00:28:28,142 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:28:28,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:28:28,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:28:28,157 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:28,161 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemState 2024-11-28T00:28:28,161 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:28,161 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:28:28,163 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:28,170 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:28,178 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemState,, stopping at row=testtb-testExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:28:28,182 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:28:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753708182 (current time:1732753708182). 2024-11-28T00:28:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:28:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:28:28,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:28:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@cb8e156, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:28,183 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:28,184 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:28,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:28,184 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:28,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2ef409b4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:28,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:28,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,186 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55246, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:28,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b021a30, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,187 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:28,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:28,188 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:28,189 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:28,190 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:28,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:28,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,190 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,190 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46634597, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:28,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:28,192 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:28,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:28,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:28,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5db54389, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,193 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:28,194 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:28,195 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55262, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:28,195 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,196 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e7ebd80, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:28,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:28,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:28,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:28,200 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33682, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:28,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:28:28,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:28,204 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51812, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:28,205 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,205 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:28,205 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:28,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:28:28,207 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:28:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:28:28,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-28T00:28:28,209 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:28:28,210 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:28:28,211 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:28:28,213 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:28:28,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:28:28,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:28:28,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742045_1221 (size=165) 2024-11-28T00:28:28,256 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:28:28,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0}, {pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776}] 2024-11-28T00:28:28,258 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:28,258 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:28,321 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:28:28,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=105 2024-11-28T00:28:28,410 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=106 2024-11-28T00:28:28,410 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:28,411 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:28,411 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2902): Flushing 15dc58c251b5f06d0c462511cdce82d0 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:28:28,411 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2902): Flushing 8be72578f6f15976c5529108ee2f7776 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:28:28,441 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 is 71, key is 017c632746392944c8f0551dd8ac52fd/cf:q/1732753708152/Put/seqid=0 2024-11-28T00:28:28,444 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 is 71, key is 1791464912a48490bd8b3df644dbdbe4/cf:q/1732753708155/Put/seqid=0 2024-11-28T00:28:28,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742046_1222 (size=5102) 2024-11-28T00:28:28,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742046_1222 (size=5102) 2024-11-28T00:28:28,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742046_1222 (size=5102) 2024-11-28T00:28:28,455 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:28,462 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:28,463 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/.tmp/cf/4fb8f84cc3264410ba3490d803f04760, store: [table=testtb-testExportFileSystemState family=cf region=15dc58c251b5f06d0c462511cdce82d0] 2024-11-28T00:28:28,464 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/.tmp/cf/4fb8f84cc3264410ba3490d803f04760 is 209, key is 0a91d1470a9472f6dbbdd48952c64cade/cf:q/1732753708152/Put/seqid=0 2024-11-28T00:28:28,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742047_1223 (size=8172) 2024-11-28T00:28:28,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742047_1223 (size=8172) 2024-11-28T00:28:28,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742047_1223 (size=8172) 2024-11-28T00:28:28,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:28,479 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:28,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/.tmp/cf/9d0dea63cb8f455ab7540c7bf4318874, store: [table=testtb-testExportFileSystemState family=cf region=8be72578f6f15976c5529108ee2f7776] 2024-11-28T00:28:28,481 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/.tmp/cf/9d0dea63cb8f455ab7540c7bf4318874 is 209, key is 11b12c10e5d18cc1f50e62ce933d9e986/cf:q/1732753708155/Put/seqid=0 2024-11-28T00:28:28,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742048_1224 (size=5918) 2024-11-28T00:28:28,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742048_1224 (size=5918) 2024-11-28T00:28:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742048_1224 (size=5918) 2024-11-28T00:28:28,490 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/.tmp/cf/4fb8f84cc3264410ba3490d803f04760 2024-11-28T00:28:28,496 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/.tmp/cf/4fb8f84cc3264410ba3490d803f04760 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760 2024-11-28T00:28:28,509 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:28:28,512 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 15dc58c251b5f06d0c462511cdce82d0 in 101ms, sequenceid=6, compaction requested=false 2024-11-28T00:28:28,512 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemState' 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.HRegion(2603): Flush status journal for 15dc58c251b5f06d0c462511cdce82d0: 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. for snaptb0-testExportFileSystemState completed. 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760] hfiles 2024-11-28T00:28:28,513 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760 for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:28:28,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742049_1225 (size=14999) 2024-11-28T00:28:28,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742049_1225 (size=14999) 2024-11-28T00:28:28,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742049_1225 (size=14999) 2024-11-28T00:28:28,522 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/.tmp/cf/9d0dea63cb8f455ab7540c7bf4318874 2024-11-28T00:28:28,531 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/.tmp/cf/9d0dea63cb8f455ab7540c7bf4318874 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874 2024-11-28T00:28:28,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:28:28,540 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874, entries=47, sequenceid=6, filesize=14.6 K 2024-11-28T00:28:28,541 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 8be72578f6f15976c5529108ee2f7776 in 130ms, sequenceid=6, compaction requested=false 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.HRegion(2603): Flush status journal for 8be72578f6f15976c5529108ee2f7776: 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. for snaptb0-testExportFileSystemState completed. 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.' region-info for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874] hfiles 2024-11-28T00:28:28,541 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874 for snapshot=snaptb0-testExportFileSystemState 2024-11-28T00:28:28,551 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:28:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:28:28,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742050_1226 (size=110) 2024-11-28T00:28:28,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:28,553 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=105}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=105 2024-11-28T00:28:28,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=105 2024-11-28T00:28:28,554 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:28,554 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=105, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:28,556 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=105, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 15dc58c251b5f06d0c462511cdce82d0 in 299 msec 2024-11-28T00:28:28,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:28:28,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:28:28,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742051_1227 (size=110) 2024-11-28T00:28:28,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:28,572 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=106}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=106 2024-11-28T00:28:28,573 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=106 2024-11-28T00:28:28,573 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemState on region 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:28,573 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=106, ppid=104, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:28,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=106, resume processing ppid=104 2024-11-28T00:28:28,577 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=106, ppid=104, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8be72578f6f15976c5529108ee2f7776 in 318 msec 2024-11-28T00:28:28,577 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:28:28,578 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:28:28,580 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:28:28,580 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:28:28,580 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:28,582 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0] hfiles 2024-11-28T00:28:28,582 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:28,582 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:28,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:28:28,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:28:28,597 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742052_1228 (size=294) 2024-11-28T00:28:28,598 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:28:28,598 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemState 2024-11-28T00:28:28,599 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:28:28,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:28:28,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:28:28,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742053_1229 (size=963) 2024-11-28T00:28:28,632 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:28:28,651 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:28:28,652 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:28:28,654 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=104, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:28:28,654 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 104 2024-11-28T00:28:28,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=104, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=104, snapshot={ ss=snaptb0-testExportFileSystemState table=testtb-testExportFileSystemState type=FLUSH ttl=0 } in 447 msec 2024-11-28T00:28:28,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=104 2024-11-28T00:28:28,844 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:28:28,844 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844 2024-11-28T00:28:28,844 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:28,895 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:28,895 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:28:28,897 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:28:28,910 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemState 2024-11-28T00:28:28,939 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:28:28,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:28:28,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742055_1231 (size=963) 2024-11-28T00:28:28,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:28:28,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:28:28,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742054_1230 (size=165) 2024-11-28T00:28:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:29,148 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000002/launch_container.sh] 2024-11-28T00:28:29,149 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000002/container_tokens] 2024-11-28T00:28:29,149 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000002/sysfs] 2024-11-28T00:28:30,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-10766357054270160257.jar 2024-11-28T00:28:30,291 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,292 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,380 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-10271581996044897169.jar 2024-11-28T00:28:30,381 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,381 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,381 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,382 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:30,383 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:28:30,383 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:28:30,384 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:28:30,384 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:28:30,384 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:28:30,385 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:28:30,385 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:28:30,385 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:28:30,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:28:30,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:28:30,386 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:28:30,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:30,387 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:30,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:30,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:30,388 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:30,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:30,389 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:30,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742056_1232 (size=131440) 2024-11-28T00:28:30,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742056_1232 (size=131440) 2024-11-28T00:28:30,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742056_1232 (size=131440) 2024-11-28T00:28:30,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742057_1233 (size=4188619) 2024-11-28T00:28:30,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742057_1233 (size=4188619) 2024-11-28T00:28:30,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742057_1233 (size=4188619) 2024-11-28T00:28:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742058_1234 (size=1323991) 2024-11-28T00:28:30,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742058_1234 (size=1323991) 2024-11-28T00:28:30,631 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742058_1234 (size=1323991) 2024-11-28T00:28:30,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742059_1235 (size=903927) 2024-11-28T00:28:30,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742059_1235 (size=903927) 2024-11-28T00:28:30,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742059_1235 (size=903927) 2024-11-28T00:28:30,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742060_1236 (size=8360083) 2024-11-28T00:28:30,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742060_1236 (size=8360083) 2024-11-28T00:28:30,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742060_1236 (size=8360083) 2024-11-28T00:28:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742061_1237 (size=1877034) 2024-11-28T00:28:30,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742061_1237 (size=1877034) 2024-11-28T00:28:30,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742061_1237 (size=1877034) 2024-11-28T00:28:30,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742062_1238 (size=440957) 2024-11-28T00:28:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742062_1238 (size=440957) 2024-11-28T00:28:30,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742062_1238 (size=440957) 2024-11-28T00:28:30,843 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0003_000001 (auth:SIMPLE) from 127.0.0.1:45038 2024-11-28T00:28:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742063_1239 (size=77835) 2024-11-28T00:28:30,846 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742063_1239 (size=77835) 2024-11-28T00:28:30,847 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742063_1239 (size=77835) 2024-11-28T00:28:30,856 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000001/launch_container.sh] 2024-11-28T00:28:30,856 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000001/container_tokens] 2024-11-28T00:28:30,856 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0003/container_1732753513471_0003_01_000001/sysfs] 2024-11-28T00:28:30,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742064_1240 (size=30949) 2024-11-28T00:28:30,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742064_1240 (size=30949) 2024-11-28T00:28:30,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742064_1240 (size=30949) 2024-11-28T00:28:30,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742065_1241 (size=1597213) 2024-11-28T00:28:30,907 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742065_1241 (size=1597213) 2024-11-28T00:28:30,908 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742065_1241 (size=1597213) 2024-11-28T00:28:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742066_1242 (size=4695811) 2024-11-28T00:28:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742066_1242 (size=4695811) 2024-11-28T00:28:30,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742066_1242 (size=4695811) 2024-11-28T00:28:30,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742067_1243 (size=232957) 2024-11-28T00:28:30,954 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742067_1243 (size=232957) 2024-11-28T00:28:30,955 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742067_1243 (size=232957) 2024-11-28T00:28:30,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742068_1244 (size=127628) 2024-11-28T00:28:30,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742068_1244 (size=127628) 2024-11-28T00:28:30,964 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742068_1244 (size=127628) 2024-11-28T00:28:30,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742069_1245 (size=20406) 2024-11-28T00:28:30,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742069_1245 (size=20406) 2024-11-28T00:28:30,998 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742069_1245 (size=20406) 2024-11-28T00:28:31,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742070_1246 (size=5175431) 2024-11-28T00:28:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742070_1246 (size=5175431) 2024-11-28T00:28:31,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742070_1246 (size=5175431) 2024-11-28T00:28:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742071_1247 (size=217634) 2024-11-28T00:28:31,062 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742071_1247 (size=217634) 2024-11-28T00:28:31,063 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742071_1247 (size=217634) 2024-11-28T00:28:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:28:31,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:28:31,101 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742072_1248 (size=6424743) 2024-11-28T00:28:31,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742073_1249 (size=1832290) 2024-11-28T00:28:31,140 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742073_1249 (size=1832290) 2024-11-28T00:28:31,141 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742073_1249 (size=1832290) 2024-11-28T00:28:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742074_1250 (size=322274) 2024-11-28T00:28:31,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742074_1250 (size=322274) 2024-11-28T00:28:31,184 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742074_1250 (size=322274) 2024-11-28T00:28:31,205 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742075_1251 (size=503880) 2024-11-28T00:28:31,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742075_1251 (size=503880) 2024-11-28T00:28:31,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742075_1251 (size=503880) 2024-11-28T00:28:31,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742076_1252 (size=29229) 2024-11-28T00:28:31,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742076_1252 (size=29229) 2024-11-28T00:28:31,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742076_1252 (size=29229) 2024-11-28T00:28:31,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742077_1253 (size=24096) 2024-11-28T00:28:31,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742077_1253 (size=24096) 2024-11-28T00:28:31,290 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742077_1253 (size=24096) 2024-11-28T00:28:31,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742078_1254 (size=111872) 2024-11-28T00:28:31,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742078_1254 (size=111872) 2024-11-28T00:28:31,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742078_1254 (size=111872) 2024-11-28T00:28:31,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742079_1255 (size=45609) 2024-11-28T00:28:31,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742079_1255 (size=45609) 2024-11-28T00:28:31,331 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742079_1255 (size=45609) 2024-11-28T00:28:31,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742080_1256 (size=136454) 2024-11-28T00:28:31,350 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742080_1256 (size=136454) 2024-11-28T00:28:31,351 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742080_1256 (size=136454) 2024-11-28T00:28:31,352 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:28:31,354 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemState' hfile list 2024-11-28T00:28:31,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.6 K 2024-11-28T00:28:31,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:28:31,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-28T00:28:31,356 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:28:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:28:31,365 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:28:31,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742081_1257 (size=1035) 2024-11-28T00:28:31,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:28:31,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:28:31,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742082_1258 (size=35) 2024-11-28T00:28:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742083_1259 (size=304006) 2024-11-28T00:28:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742083_1259 (size=304006) 2024-11-28T00:28:31,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742083_1259 (size=304006) 2024-11-28T00:28:31,410 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:31,410 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:31,493 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:28:31,585 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:48760 2024-11-28T00:28:33,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:28:35,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-28T00:28:35,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-28T00:28:35,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportWithResetTtl 2024-11-28T00:28:35,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithResetTtl 2024-11-28T00:28:39,565 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:37242 2024-11-28T00:28:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742084_1260 (size=349704) 2024-11-28T00:28:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742084_1260 (size=349704) 2024-11-28T00:28:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742084_1260 (size=349704) 2024-11-28T00:28:41,045 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:28:41,871 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:60754 2024-11-28T00:28:41,874 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:34710 2024-11-28T00:28:42,686 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:33762 2024-11-28T00:28:42,693 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:60486 2024-11-28T00:28:44,829 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0004_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:28:47,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742085_1261 (size=8172) 2024-11-28T00:28:47,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742085_1261 (size=8172) 2024-11-28T00:28:47,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742085_1261 (size=8172) 2024-11-28T00:28:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742088_1264 (size=14999) 2024-11-28T00:28:50,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742088_1264 (size=14999) 2024-11-28T00:28:50,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742088_1264 (size=14999) 2024-11-28T00:28:50,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742087_1263 (size=5102) 2024-11-28T00:28:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742087_1263 (size=5102) 2024-11-28T00:28:50,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742087_1263 (size=5102) 2024-11-28T00:28:50,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742089_1265 (size=5918) 2024-11-28T00:28:50,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742089_1265 (size=5918) 2024-11-28T00:28:50,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742089_1265 (size=5918) 2024-11-28T00:28:50,705 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742086_1262 (size=31749) 2024-11-28T00:28:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742086_1262 (size=31749) 2024-11-28T00:28:50,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742086_1262 (size=31749) 2024-11-28T00:28:50,749 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000002/launch_container.sh] 2024-11-28T00:28:50,749 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000002/container_tokens] 2024-11-28T00:28:50,749 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000002/sysfs] 2024-11-28T00:28:50,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:28:50,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:28:50,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742090_1266 (size=466) 2024-11-28T00:28:50,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742091_1267 (size=31749) 2024-11-28T00:28:50,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742091_1267 (size=31749) 2024-11-28T00:28:50,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742091_1267 (size=31749) 2024-11-28T00:28:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742092_1268 (size=349704) 2024-11-28T00:28:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742092_1268 (size=349704) 2024-11-28T00:28:50,835 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742092_1268 (size=349704) 2024-11-28T00:28:50,851 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:60498 2024-11-28T00:28:50,863 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:33768 2024-11-28T00:28:52,596 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:28:52,598 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:28:52,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemState 2024-11-28T00:28:52,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:28:52,605 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:28:52,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:28:52,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-28T00:28:52,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-28T00:28:52,606 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/snaptb0-testExportFileSystemState at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/snaptb0-testExportFileSystemState 2024-11-28T00:28:52,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/snaptb0-testExportFileSystemState/.snapshotinfo 2024-11-28T00:28:52,607 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753708844/.hbase-snapshot/snaptb0-testExportFileSystemState/data.manifest 2024-11-28T00:28:52,613 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemState 2024-11-28T00:28:52,614 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=107, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:28:52,617 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753732617"}]},"ts":"1732753732617"} 2024-11-28T00:28:52,619 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLING in hbase:meta 2024-11-28T00:28:52,619 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemState to state=DISABLING 2024-11-28T00:28:52,619 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=108, ppid=107, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState}] 2024-11-28T00:28:52,621 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, UNASSIGN}, {pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, UNASSIGN}] 2024-11-28T00:28:52,622 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, UNASSIGN 2024-11-28T00:28:52,623 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, UNASSIGN 2024-11-28T00:28:52,624 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=8be72578f6f15976c5529108ee2f7776, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:52,624 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=15dc58c251b5f06d0c462511cdce82d0, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:52,626 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=110, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, UNASSIGN because future has completed 2024-11-28T00:28:52,626 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:52,626 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:28:52,627 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=109, ppid=108, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, UNASSIGN because future has completed 2024-11-28T00:28:52,628 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:28:52,628 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:28:52,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:28:52,781 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(122): Close 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:52,781 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:52,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1722): Closing 8be72578f6f15976c5529108ee2f7776, disabling compactions & flushes 2024-11-28T00:28:52,782 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:52,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:52,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. after waiting 0 ms 2024-11-28T00:28:52,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:52,789 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(122): Close 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:52,789 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:28:52,789 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1722): Closing 15dc58c251b5f06d0c462511cdce82d0, disabling compactions & flushes 2024-11-28T00:28:52,789 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:52,789 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:52,789 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. after waiting 0 ms 2024-11-28T00:28:52,789 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:52,835 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:28:52,836 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:52,836 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:28:52,836 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776. 2024-11-28T00:28:52,837 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] regionserver.HRegion(1676): Region close journal for 8be72578f6f15976c5529108ee2f7776: Waiting for close lock at 1732753732781Running coprocessor pre-close hooks at 1732753732781Disabling compacts and flushes for region at 1732753732781Disabling writes for close at 1732753732782 (+1 ms)Writing region close event to WAL at 1732753732796 (+14 ms)Running coprocessor post-close hooks at 1732753732836 (+40 ms)Closed at 1732753732836 2024-11-28T00:28:52,838 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:28:52,838 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0. 2024-11-28T00:28:52,838 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] regionserver.HRegion(1676): Region close journal for 15dc58c251b5f06d0c462511cdce82d0: Waiting for close lock at 1732753732789Running coprocessor pre-close hooks at 1732753732789Disabling compacts and flushes for region at 1732753732789Disabling writes for close at 1732753732789Writing region close event to WAL at 1732753732800 (+11 ms)Running coprocessor post-close hooks at 1732753732838 (+38 ms)Closed at 1732753732838 2024-11-28T00:28:52,839 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=111}] handler.UnassignRegionHandler(157): Closed 8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:52,840 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=110 updating hbase:meta row=8be72578f6f15976c5529108ee2f7776, regionState=CLOSED 2024-11-28T00:28:52,841 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=112}] handler.UnassignRegionHandler(157): Closed 15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:52,841 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=109 updating hbase:meta row=15dc58c251b5f06d0c462511cdce82d0, regionState=CLOSED 2024-11-28T00:28:52,843 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=111, ppid=110, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:28:52,844 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=112, ppid=109, state=RUNNABLE, hasLock=false; CloseRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:28:52,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=111, resume processing ppid=110 2024-11-28T00:28:52,847 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=111, ppid=110, state=SUCCESS, hasLock=false; CloseRegionProcedure 8be72578f6f15976c5529108ee2f7776, server=72c8c7f04009,41621,1732753506207 in 218 msec 2024-11-28T00:28:52,848 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=110, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=8be72578f6f15976c5529108ee2f7776, UNASSIGN in 226 msec 2024-11-28T00:28:52,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=112, resume processing ppid=109 2024-11-28T00:28:52,848 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=112, ppid=109, state=SUCCESS, hasLock=false; CloseRegionProcedure 15dc58c251b5f06d0c462511cdce82d0, server=72c8c7f04009,43335,1732753506004 in 217 msec 2024-11-28T00:28:52,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=109, resume processing ppid=108 2024-11-28T00:28:52,850 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=109, ppid=108, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemState, region=15dc58c251b5f06d0c462511cdce82d0, UNASSIGN in 227 msec 2024-11-28T00:28:52,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=108, resume processing ppid=107 2024-11-28T00:28:52,857 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=108, ppid=107, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemState in 232 msec 2024-11-28T00:28:52,859 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753732859"}]},"ts":"1732753732859"} 2024-11-28T00:28:52,862 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemState, state=DISABLED in hbase:meta 2024-11-28T00:28:52,862 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemState to state=DISABLED 2024-11-28T00:28:52,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=107, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemState in 251 msec 2024-11-28T00:28:52,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=107 2024-11-28T00:28:52,932 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:28:52,932 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemState 2024-11-28T00:28:52,934 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,935 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=113, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,935 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemState 2024-11-28T00:28:52,939 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=113, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,940 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemState 2024-11-28T00:28:52,943 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:52,943 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:52,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,944 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:28:52,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:28:52,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:28:52,945 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemState with data PBUF 2024-11-28T00:28:52,946 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/recovered.edits] 2024-11-28T00:28:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,947 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:52,948 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/recovered.edits] 2024-11-28T00:28:52,948 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,949 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:52,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-28T00:28:52,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemState 2024-11-28T00:28:52,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:52,951 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/cf/9d0dea63cb8f455ab7540c7bf4318874 2024-11-28T00:28:52,953 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/cf/4fb8f84cc3264410ba3490d803f04760 2024-11-28T00:28:52,955 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776/recovered.edits/9.seqid 2024-11-28T00:28:52,955 DEBUG [HFileArchiver-13 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:52,958 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0/recovered.edits/9.seqid 2024-11-28T00:28:52,959 DEBUG [HFileArchiver-12 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemState/15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:52,959 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemState regions 2024-11-28T00:28:52,960 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-28T00:28:52,961 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf] 2024-11-28T00:28:52,965 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/c4ca4238a0b923820dcc509a6f75849b202411289e8d373f4b6c40c89eb3e37206112510_8be72578f6f15976c5529108ee2f7776 2024-11-28T00:28:52,967 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d/cf/d41d8cd98f00b204e9800998ecf8427e202411280af19db63ce740d9b828ad40cd262d4a_15dc58c251b5f06d0c462511cdce82d0 2024-11-28T00:28:52,968 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemState/8c2558c0b59bb3a4e2e578c2c8e8915d 2024-11-28T00:28:52,970 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=113, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,973 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemState from hbase:meta 2024-11-28T00:28:52,976 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemState' descriptor. 2024-11-28T00:28:52,977 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=113, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,977 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemState' from region states. 2024-11-28T00:28:52,977 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753732977"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:52,977 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753732977"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:52,980 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:28:52,980 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 15dc58c251b5f06d0c462511cdce82d0, NAME => 'testtb-testExportFileSystemState,,1732753706803.15dc58c251b5f06d0c462511cdce82d0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8be72578f6f15976c5529108ee2f7776, NAME => 'testtb-testExportFileSystemState,1,1732753706803.8be72578f6f15976c5529108ee2f7776.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:28:52,980 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemState' as deleted. 2024-11-28T00:28:52,980 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753732980"}]},"ts":"9223372036854775807"} 2024-11-28T00:28:52,983 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemState state from META 2024-11-28T00:28:52,984 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=113, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemState 2024-11-28T00:28:52,985 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=113, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemState in 51 msec 2024-11-28T00:28:53,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=113 2024-11-28T00:28:53,052 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemState 2024-11-28T00:28:53,052 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemState completed 2024-11-28T00:28:53,062 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemState" type: DISABLED 2024-11-28T00:28:53,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemState 2024-11-28T00:28:53,067 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemState" type: DISABLED 2024-11-28T00:28:53,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemState 2024-11-28T00:28:53,105 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemState Thread=793 (was 788) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46613 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:37297 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LogDeleter #2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 29620) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-3890 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:36218 [Waiting for operation #7] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #6 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1836127958_1 at /127.0.0.1:41082 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1836127958_1 at /127.0.0.1:47542 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-13 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:41104 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:46613 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=805 (was 790) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=990 (was 792) - SystemLoadAverage LEAK? -, ProcessCount=17 (was 17), AvailableMemoryMB=3735 (was 3766) 2024-11-28T00:28:53,105 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-28T00:28:53,146 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=793, OpenFileDescriptor=807, MaxFileDescriptor=1048576, SystemLoadAverage=990, ProcessCount=17, AvailableMemoryMB=3729 2024-11-28T00:28:53,146 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=793 is superior to 500 2024-11-28T00:28:53,148 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:28:53,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:28:53,151 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:28:53,152 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testConsecutiveExports" procId is: 114 2024-11-28T00:28:53,152 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:28:53,153 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:28:53,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:28:53,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:28:53,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742093_1269 (size=440) 2024-11-28T00:28:53,217 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 04f788ca6a411174bb6c65eeb585071a, NAME => 'testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:53,225 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 02c31fc4be530862019024712f0950b0, NAME => 'testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testConsecutiveExports', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:53,263 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:28:53,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:28:53,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:28:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742094_1270 (size=65) 2024-11-28T00:28:53,275 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:53,276 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1722): Closing 04f788ca6a411174bb6c65eeb585071a, disabling compactions & flushes 2024-11-28T00:28:53,276 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,276 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,276 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. after waiting 0 ms 2024-11-28T00:28:53,276 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,276 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,276 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-0 {}] regionserver.HRegion(1676): Region close journal for 04f788ca6a411174bb6c65eeb585071a: Waiting for close lock at 1732753733276Disabling compacts and flushes for region at 1732753733276Disabling writes for close at 1732753733276Writing region close event to WAL at 1732753733276Closed at 1732753733276 2024-11-28T00:28:53,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:28:53,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:28:53,293 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742095_1271 (size=65) 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1722): Closing 02c31fc4be530862019024712f0950b0, disabling compactions & flushes 2024-11-28T00:28:53,295 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. after waiting 0 ms 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,295 INFO [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,295 DEBUG [RegionOpenAndInit-testtb-testConsecutiveExports-pool-1 {}] regionserver.HRegion(1676): Region close journal for 02c31fc4be530862019024712f0950b0: Waiting for close lock at 1732753733295Disabling compacts and flushes for region at 1732753733295Disabling writes for close at 1732753733295Writing region close event to WAL at 1732753733295Closed at 1732753733295 2024-11-28T00:28:53,298 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:28:53,298 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753733298"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753733298"}]},"ts":"1732753733298"} 2024-11-28T00:28:53,298 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753733298"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753733298"}]},"ts":"1732753733298"} 2024-11-28T00:28:53,302 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:28:53,303 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:28:53,303 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753733303"}]},"ts":"1732753733303"} 2024-11-28T00:28:53,306 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLING in hbase:meta 2024-11-28T00:28:53,306 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:28:53,310 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:28:53,310 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:28:53,310 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:28:53,310 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:28:53,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, ASSIGN}, {pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, ASSIGN}] 2024-11-28T00:28:53,312 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, ASSIGN 2024-11-28T00:28:53,312 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, ASSIGN 2024-11-28T00:28:53,313 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:28:53,314 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:28:53,444 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000003/launch_container.sh] 2024-11-28T00:28:53,444 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000003/container_tokens] 2024-11-28T00:28:53,444 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000003/sysfs] 2024-11-28T00:28:53,464 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:28:53,465 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=04f788ca6a411174bb6c65eeb585071a, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:53,465 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=02c31fc4be530862019024712f0950b0, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:53,467 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=115, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, ASSIGN because future has completed 2024-11-28T00:28:53,468 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:28:53,469 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=116, ppid=114, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, ASSIGN because future has completed 2024-11-28T00:28:53,470 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:28:53,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:28:53,629 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,629 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7752): Opening region: {ENCODED => 04f788ca6a411174bb6c65eeb585071a, NAME => 'testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:28:53,630 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. service=AccessControlService 2024-11-28T00:28:53,630 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:28:53,630 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,630 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:53,630 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7794): checking encryption for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,631 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(7797): checking classloading for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,632 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(132): Open testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,632 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7752): Opening region: {ENCODED => 02c31fc4be530862019024712f0950b0, NAME => 'testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:28:53,633 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. service=AccessControlService 2024-11-28T00:28:53,633 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:28:53,633 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testConsecutiveExports 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,633 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(898): Instantiated testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:28:53,633 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7794): checking encryption for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,633 INFO [StoreOpener-04f788ca6a411174bb6c65eeb585071a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,633 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(7797): checking classloading for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,635 INFO [StoreOpener-04f788ca6a411174bb6c65eeb585071a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 04f788ca6a411174bb6c65eeb585071a columnFamilyName cf 2024-11-28T00:28:53,636 DEBUG [StoreOpener-04f788ca6a411174bb6c65eeb585071a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:53,637 INFO [StoreOpener-04f788ca6a411174bb6c65eeb585071a-1 {}] regionserver.HStore(327): Store=04f788ca6a411174bb6c65eeb585071a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:28:53,637 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1038): replaying wal for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,638 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,639 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,639 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1048): stopping wal replay for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,639 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1060): Cleaning up temporary data for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,641 INFO [StoreOpener-02c31fc4be530862019024712f0950b0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,642 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1093): writing seq id for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,645 INFO [StoreOpener-02c31fc4be530862019024712f0950b0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 02c31fc4be530862019024712f0950b0 columnFamilyName cf 2024-11-28T00:28:53,645 DEBUG [StoreOpener-02c31fc4be530862019024712f0950b0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:53,646 INFO [StoreOpener-02c31fc4be530862019024712f0950b0-1 {}] regionserver.HStore(327): Store=02c31fc4be530862019024712f0950b0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:28:53,646 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1038): replaying wal for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,648 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,648 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,650 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1048): stopping wal replay for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,650 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1060): Cleaning up temporary data for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,653 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1093): writing seq id for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,661 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:28:53,661 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1114): Opened 02c31fc4be530862019024712f0950b0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71803439, jitterRate=0.06995461881160736}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:28:53,661 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,662 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegion(1006): Region open journal for 02c31fc4be530862019024712f0950b0: Running coprocessor pre-open hook at 1732753733633Writing region info on filesystem at 1732753733633Initializing all the Stores at 1732753733636 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753733636Cleaning up temporary data from old regions at 1732753733650 (+14 ms)Running coprocessor post-open hooks at 1732753733661 (+11 ms)Region opened successfully at 1732753733662 (+1 ms) 2024-11-28T00:28:53,664 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0., pid=118, masterSystemTime=1732753733626 2024-11-28T00:28:53,667 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,667 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=118}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:53,667 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=116 updating hbase:meta row=02c31fc4be530862019024712f0950b0, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:28:53,668 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:28:53,671 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1114): Opened 04f788ca6a411174bb6c65eeb585071a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=59253659, jitterRate=-0.11705167591571808}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:28:53,671 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,671 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegion(1006): Region open journal for 04f788ca6a411174bb6c65eeb585071a: Running coprocessor pre-open hook at 1732753733631Writing region info on filesystem at 1732753733631Initializing all the Stores at 1732753733633 (+2 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753733633Cleaning up temporary data from old regions at 1732753733639 (+6 ms)Running coprocessor post-open hooks at 1732753733671 (+32 ms)Region opened successfully at 1732753733671 2024-11-28T00:28:53,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=118, ppid=116, state=RUNNABLE, hasLock=false; OpenRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:28:53,673 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a., pid=117, masterSystemTime=1732753733623 2024-11-28T00:28:53,679 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=115 updating hbase:meta row=04f788ca6a411174bb6c65eeb585071a, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:28:53,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=117, ppid=115, state=RUNNABLE, hasLock=false; OpenRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:28:53,683 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,683 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=117}] handler.AssignRegionHandler(153): Opened testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=118, resume processing ppid=116 2024-11-28T00:28:53,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=118, ppid=116, state=SUCCESS, hasLock=false; OpenRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207 in 203 msec 2024-11-28T00:28:53,685 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=116, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, ASSIGN in 373 msec 2024-11-28T00:28:53,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=117, resume processing ppid=115 2024-11-28T00:28:53,688 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=117, ppid=115, state=SUCCESS, hasLock=false; OpenRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004 in 217 msec 2024-11-28T00:28:53,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=115, resume processing ppid=114 2024-11-28T00:28:53,693 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=115, ppid=114, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, ASSIGN in 378 msec 2024-11-28T00:28:53,694 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:28:53,694 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753733694"}]},"ts":"1732753733694"} 2024-11-28T00:28:53,697 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=ENABLED in hbase:meta 2024-11-28T00:28:53,699 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=114, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testConsecutiveExports execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:28:53,699 DEBUG [PEWorker-1 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testConsecutiveExports jenkins: RWXCA 2024-11-28T00:28:53,703 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:28:53,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:53,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:53,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:53,706 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:28:53,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:28:53,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:28:53,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:28:53,710 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:28:53,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:53,711 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:53,711 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:53,711 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testConsecutiveExports \x00 \x01 \x02 \x03 \x04 2024-11-28T00:28:53,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=114, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testConsecutiveExports in 564 msec 2024-11-28T00:28:53,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=114 2024-11-28T00:28:53,782 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:28:53,782 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:53,786 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-28T00:28:53,786 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:53,787 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:28:53,789 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:53,797 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:53,810 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:53,815 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:28:53,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753733815 (current time:1732753733815). 2024-11-28T00:28:53,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:28:53,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-28T00:28:53,815 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:28:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7205887a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:53,816 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:53,817 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:53,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:53,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:53,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28062632, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,818 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:53,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:53,819 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,819 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:53,820 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ad4405f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:53,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:53,822 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:53,823 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37486, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:53,824 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:53,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:53,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,825 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,825 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:53,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5611d81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:53,826 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:53,827 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@65ebefa8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:53,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:53,828 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,829 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37112, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:53,831 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@47cb2f9f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:53,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:53,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:53,833 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:53,835 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:53,837 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:28:53,838 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:53,839 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53314, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:53,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:53,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:53,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:53,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:28:53,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:28:53,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:28:53,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-28T00:28:53,844 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:53,845 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:28:53,846 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:28:53,847 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:28:53,850 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:28:53,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:28:53,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:28:53,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742096_1272 (size=161) 2024-11-28T00:28:53,870 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:28:53,871 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a}, {pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0}] 2024-11-28T00:28:53,873 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:53,874 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:53,951 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:28:54,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=120 2024-11-28T00:28:54,026 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=121 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.HRegion(2603): Flush status journal for 02c31fc4be530862019024712f0950b0: 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.HRegion(2603): Flush status journal for 04f788ca6a411174bb6c65eeb585071a: 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. for emptySnaptb0-testConsecutiveExports completed. 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. for emptySnaptb0-testConsecutiveExports completed. 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-28T00:28:54,026 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:54,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:28:54,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.' region-info for snapshot=emptySnaptb0-testConsecutiveExports 2024-11-28T00:28:54,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:54,027 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:28:54,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:28:54,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:28:54,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742097_1273 (size=68) 2024-11-28T00:28:54,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:54,044 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=121}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=121 2024-11-28T00:28:54,045 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=121 2024-11-28T00:28:54,045 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,045 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=121, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,054 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=121, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 in 182 msec 2024-11-28T00:28:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:28:54,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:28:54,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742098_1274 (size=68) 2024-11-28T00:28:54,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:54,056 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=120}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=120 2024-11-28T00:28:54,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=120 2024-11-28T00:28:54,056 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testConsecutiveExports on region 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,057 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=120, ppid=119, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=120, resume processing ppid=119 2024-11-28T00:28:54,060 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=120, ppid=119, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a in 187 msec 2024-11-28T00:28:54,060 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:28:54,061 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:28:54,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:28:54,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:28:54,062 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:54,063 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:28:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:28:54,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:28:54,070 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742099_1275 (size=60) 2024-11-28T00:28:54,071 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:28:54,071 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testConsecutiveExports 2024-11-28T00:28:54,072 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports 2024-11-28T00:28:54,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:28:54,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:28:54,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742100_1276 (size=641) 2024-11-28T00:28:54,130 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:28:54,141 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:28:54,141 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testConsecutiveExports to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testConsecutiveExports 2024-11-28T00:28:54,143 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=119, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:28:54,143 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 119 2024-11-28T00:28:54,144 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=119, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=119, snapshot={ ss=emptySnaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 301 msec 2024-11-28T00:28:54,161 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=119 2024-11-28T00:28:54,162 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:28:54,170 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:28:54,175 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:28:54,176 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:54,180 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testConsecutiveExports 2024-11-28T00:28:54,180 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:54,180 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:28:54,182 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:54,188 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:54,194 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testConsecutiveExports,, stopping at row=testtb-testConsecutiveExports ,, for max=2147483647 with caching=100 2024-11-28T00:28:54,196 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:28:54,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753734196 (current time:1732753734196). 2024-11-28T00:28:54,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:28:54,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testConsecutiveExports VERSION not specified, setting to 2 2024-11-28T00:28:54,196 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:28:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@318149fc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:54,197 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:54,198 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:54,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:54,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:54,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f98ac6a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,198 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:54,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:54,199 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,199 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:54,200 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3bf1ac29, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,200 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:54,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:54,201 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:54,202 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:54,203 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,203 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,203 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:54,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@508cc877, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:28:54,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:28:54,207 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5098b06, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:28:54,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,209 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37136, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:28:54,210 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@43409ec8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:28:54,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:28:54,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:28:54,211 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:54,212 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37518, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:54,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testConsecutiveExports', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:28:54,214 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:28:54,215 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:28:54,216 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:28:54,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:28:54,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,216 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:28:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testConsecutiveExports], kv [jenkins: RWXCA] 2024-11-28T00:28:54,217 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:28:54,218 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:28:54,218 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } 2024-11-28T00:28:54,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-28T00:28:54,219 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:28:54,220 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:28:54,221 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:28:54,223 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:28:54,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:28:54,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:28:54,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742101_1277 (size=156) 2024-11-28T00:28:54,240 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:28:54,240 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a}, {pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0}] 2024-11-28T00:28:54,241 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,241 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:28:54,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=124 2024-11-28T00:28:54,393 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=123 2024-11-28T00:28:54,393 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:54,394 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:54,394 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2902): Flushing 04f788ca6a411174bb6c65eeb585071a 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-28T00:28:54,394 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2902): Flushing 02c31fc4be530862019024712f0950b0 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-28T00:28:54,415 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 is 71, key is 10088adc439335974fb68ba1b544e46d/cf:q/1732753734175/Put/seqid=0 2024-11-28T00:28:54,425 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a is 71, key is 040762d0ca783146443fe1269b45e27a/cf:q/1732753734170/Put/seqid=0 2024-11-28T00:28:54,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742102_1278 (size=8101) 2024-11-28T00:28:54,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742102_1278 (size=8101) 2024-11-28T00:28:54,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742102_1278 (size=8101) 2024-11-28T00:28:54,430 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:54,435 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,436 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/.tmp/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f, store: [table=testtb-testConsecutiveExports family=cf region=02c31fc4be530862019024712f0950b0] 2024-11-28T00:28:54,437 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/.tmp/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f is 206, key is 138692774f257ca038cf25e7d92806aab/cf:q/1732753734175/Put/seqid=0 2024-11-28T00:28:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742103_1279 (size=5171) 2024-11-28T00:28:54,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742103_1279 (size=5171) 2024-11-28T00:28:54,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742103_1279 (size=5171) 2024-11-28T00:28:54,446 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:54,450 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,451 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/.tmp/cf/d8cde27ab2d944ee902ea30deb2adba2, store: [table=testtb-testConsecutiveExports family=cf region=04f788ca6a411174bb6c65eeb585071a] 2024-11-28T00:28:54,452 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/.tmp/cf/d8cde27ab2d944ee902ea30deb2adba2 is 206, key is 0ad41677b779e629dea0fef0010953ee0/cf:q/1732753734170/Put/seqid=0 2024-11-28T00:28:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742104_1280 (size=14651) 2024-11-28T00:28:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742104_1280 (size=14651) 2024-11-28T00:28:54,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742104_1280 (size=14651) 2024-11-28T00:28:54,463 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/.tmp/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f 2024-11-28T00:28:54,469 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/.tmp/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f 2024-11-28T00:28:54,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742105_1281 (size=6106) 2024-11-28T00:28:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742105_1281 (size=6106) 2024-11-28T00:28:54,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742105_1281 (size=6106) 2024-11-28T00:28:54,471 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/.tmp/cf/d8cde27ab2d944ee902ea30deb2adba2 2024-11-28T00:28:54,476 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f, entries=46, sequenceid=6, filesize=14.3 K 2024-11-28T00:28:54,477 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for 02c31fc4be530862019024712f0950b0 in 83ms, sequenceid=6, compaction requested=false 2024-11-28T00:28:54,477 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testConsecutiveExports' 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.HRegion(2603): Flush status journal for 02c31fc4be530862019024712f0950b0: 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. for snaptb0-testConsecutiveExports completed. 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f] hfiles 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:28:54,478 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/.tmp/cf/d8cde27ab2d944ee902ea30deb2adba2 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2 2024-11-28T00:28:54,484 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2, entries=4, sequenceid=6, filesize=6.0 K 2024-11-28T00:28:54,485 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for 04f788ca6a411174bb6c65eeb585071a in 91ms, sequenceid=6, compaction requested=false 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.HRegion(2603): Flush status journal for 04f788ca6a411174bb6c65eeb585071a: 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. for snaptb0-testConsecutiveExports completed. 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(241): Storing 'testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.' region-info for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2] hfiles 2024-11-28T00:28:54,485 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2 for snapshot=snaptb0-testConsecutiveExports 2024-11-28T00:28:54,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:28:54,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:28:54,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742106_1282 (size=107) 2024-11-28T00:28:54,494 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:28:54,495 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=124}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=124 2024-11-28T00:28:54,495 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=124 2024-11-28T00:28:54,495 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,495 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=124, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,498 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=124, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 02c31fc4be530862019024712f0950b0 in 257 msec 2024-11-28T00:28:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:28:54,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:28:54,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742107_1283 (size=107) 2024-11-28T00:28:54,501 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:28:54,502 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=123}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=123 2024-11-28T00:28:54,502 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=123 2024-11-28T00:28:54,502 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testConsecutiveExports on region 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,502 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=123, ppid=122, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=123, resume processing ppid=122 2024-11-28T00:28:54,505 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=123, ppid=122, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 04f788ca6a411174bb6c65eeb585071a in 263 msec 2024-11-28T00:28:54,505 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:28:54,506 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:28:54,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:28:54,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:28:54,507 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:28:54,508 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a] hfiles 2024-11-28T00:28:54,509 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 2024-11-28T00:28:54,509 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:28:54,522 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:28:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:28:54,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742108_1284 (size=291) 2024-11-28T00:28:54,524 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:28:54,524 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testConsecutiveExports 2024-11-28T00:28:54,525 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:28:54,564 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:28:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:28:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:28:54,565 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742109_1285 (size=951) 2024-11-28T00:28:54,576 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:28:54,583 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:28:54,583 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:28:54,585 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=122, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:28:54,585 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 }, snapshot procedure id = 122 2024-11-28T00:28:54,586 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=122, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=122, snapshot={ ss=snaptb0-testConsecutiveExports table=testtb-testConsecutiveExports type=FLUSH ttl=0 } in 368 msec 2024-11-28T00:28:54,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=122 2024-11-28T00:28:54,871 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:28:54,872 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871 2024-11-28T00:28:54,872 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:54,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:28:54,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@7d54afd5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:28:54,907 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:28:54,919 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:28:54,990 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:54,991 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:54,991 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-28T00:28:55,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports Metrics about Tables on a single HBase RegionServer 2024-11-28T00:28:55,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemState 2024-11-28T00:28:55,883 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000005/launch_container.sh] 2024-11-28T00:28:55,883 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000005/container_tokens] 2024-11-28T00:28:55,883 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000005/sysfs] 2024-11-28T00:28:55,889 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000004/launch_container.sh] 2024-11-28T00:28:55,889 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000004/container_tokens] 2024-11-28T00:28:55,889 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000004/sysfs] 2024-11-28T00:28:56,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-15957769914700066143.jar 2024-11-28T00:28:56,173 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,174 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,255 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-81034362751457640.jar 2024-11-28T00:28:56,256 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,257 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:28:56,258 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:28:56,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:28:56,259 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:28:56,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:28:56,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:28:56,260 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:28:56,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:28:56,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:28:56,261 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:28:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:28:56,262 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:28:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:56,263 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:56,264 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:28:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:56,265 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:28:56,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742110_1286 (size=131440) 2024-11-28T00:28:56,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742110_1286 (size=131440) 2024-11-28T00:28:56,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742110_1286 (size=131440) 2024-11-28T00:28:56,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742111_1287 (size=4188619) 2024-11-28T00:28:56,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742111_1287 (size=4188619) 2024-11-28T00:28:56,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742111_1287 (size=4188619) 2024-11-28T00:28:56,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742112_1288 (size=1323991) 2024-11-28T00:28:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742112_1288 (size=1323991) 2024-11-28T00:28:56,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742112_1288 (size=1323991) 2024-11-28T00:28:56,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742113_1289 (size=903927) 2024-11-28T00:28:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742113_1289 (size=903927) 2024-11-28T00:28:56,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742113_1289 (size=903927) 2024-11-28T00:28:56,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742114_1290 (size=8360083) 2024-11-28T00:28:56,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742114_1290 (size=8360083) 2024-11-28T00:28:56,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742114_1290 (size=8360083) 2024-11-28T00:28:56,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742115_1291 (size=1877034) 2024-11-28T00:28:56,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742115_1291 (size=1877034) 2024-11-28T00:28:56,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742115_1291 (size=1877034) 2024-11-28T00:28:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742116_1292 (size=77835) 2024-11-28T00:28:56,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742116_1292 (size=77835) 2024-11-28T00:28:56,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742116_1292 (size=77835) 2024-11-28T00:28:56,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742117_1293 (size=6424743) 2024-11-28T00:28:56,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742117_1293 (size=6424743) 2024-11-28T00:28:56,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742117_1293 (size=6424743) 2024-11-28T00:28:56,643 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742118_1294 (size=30949) 2024-11-28T00:28:56,644 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742118_1294 (size=30949) 2024-11-28T00:28:56,645 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742118_1294 (size=30949) 2024-11-28T00:28:56,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742119_1295 (size=1597213) 2024-11-28T00:28:56,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742119_1295 (size=1597213) 2024-11-28T00:28:56,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742119_1295 (size=1597213) 2024-11-28T00:28:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742120_1296 (size=4695811) 2024-11-28T00:28:56,788 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742120_1296 (size=4695811) 2024-11-28T00:28:56,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742120_1296 (size=4695811) 2024-11-28T00:28:56,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742121_1297 (size=232957) 2024-11-28T00:28:56,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742121_1297 (size=232957) 2024-11-28T00:28:56,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742121_1297 (size=232957) 2024-11-28T00:28:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742122_1298 (size=440957) 2024-11-28T00:28:56,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742122_1298 (size=440957) 2024-11-28T00:28:56,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742122_1298 (size=440957) 2024-11-28T00:28:56,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742123_1299 (size=127628) 2024-11-28T00:28:56,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742123_1299 (size=127628) 2024-11-28T00:28:56,892 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742123_1299 (size=127628) 2024-11-28T00:28:56,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742124_1300 (size=20406) 2024-11-28T00:28:56,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742124_1300 (size=20406) 2024-11-28T00:28:56,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742124_1300 (size=20406) 2024-11-28T00:28:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:28:57,021 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:28:57,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742125_1301 (size=5175431) 2024-11-28T00:28:57,060 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0004_000001 (auth:SIMPLE) from 127.0.0.1:58830 2024-11-28T00:28:57,089 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000001/launch_container.sh] 2024-11-28T00:28:57,090 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000001/container_tokens] 2024-11-28T00:28:57,090 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0004/container_1732753513471_0004_01_000001/sysfs] 2024-11-28T00:28:57,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742126_1302 (size=217634) 2024-11-28T00:28:57,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742126_1302 (size=217634) 2024-11-28T00:28:57,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742126_1302 (size=217634) 2024-11-28T00:28:57,175 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742127_1303 (size=1832290) 2024-11-28T00:28:57,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742127_1303 (size=1832290) 2024-11-28T00:28:57,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742127_1303 (size=1832290) 2024-11-28T00:28:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742128_1304 (size=322274) 2024-11-28T00:28:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742128_1304 (size=322274) 2024-11-28T00:28:57,222 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742128_1304 (size=322274) 2024-11-28T00:28:57,266 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742129_1305 (size=503880) 2024-11-28T00:28:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742129_1305 (size=503880) 2024-11-28T00:28:57,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742129_1305 (size=503880) 2024-11-28T00:28:57,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742130_1306 (size=29229) 2024-11-28T00:28:57,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742130_1306 (size=29229) 2024-11-28T00:28:57,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742130_1306 (size=29229) 2024-11-28T00:28:57,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742131_1307 (size=24096) 2024-11-28T00:28:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742131_1307 (size=24096) 2024-11-28T00:28:57,320 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742131_1307 (size=24096) 2024-11-28T00:28:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742132_1308 (size=111872) 2024-11-28T00:28:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742132_1308 (size=111872) 2024-11-28T00:28:57,346 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742132_1308 (size=111872) 2024-11-28T00:28:57,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742133_1309 (size=45609) 2024-11-28T00:28:57,362 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742133_1309 (size=45609) 2024-11-28T00:28:57,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742133_1309 (size=45609) 2024-11-28T00:28:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742134_1310 (size=136454) 2024-11-28T00:28:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742134_1310 (size=136454) 2024-11-28T00:28:57,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742134_1310 (size=136454) 2024-11-28T00:28:57,379 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:28:57,381 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-28T00:28:57,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.3 K 2024-11-28T00:28:57,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-11-28T00:28:57,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-11-28T00:28:57,384 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:28:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:28:57,403 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:28:57,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742135_1311 (size=1023) 2024-11-28T00:28:57,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:28:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:28:57,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742136_1312 (size=35) 2024-11-28T00:28:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742137_1313 (size=304039) 2024-11-28T00:28:57,463 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742137_1313 (size=304039) 2024-11-28T00:28:57,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742137_1313 (size=304039) 2024-11-28T00:28:57,488 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:57,489 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:28:57,784 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:45868 2024-11-28T00:28:58,388 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:29:03,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:29:05,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:55986 2024-11-28T00:29:05,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742138_1314 (size=349737) 2024-11-28T00:29:05,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742138_1314 (size=349737) 2024-11-28T00:29:05,670 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742138_1314 (size=349737) 2024-11-28T00:29:07,438 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:42362 2024-11-28T00:29:07,439 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:53236 2024-11-28T00:29:08,235 WARN [regionserver/72c8c7f04009:0.Chore.1 {}] hbase.ExecutorStatusChore(69): RS_COMPACTED_FILES_DISCHARGER's size info, queued: 4, running: 0 2024-11-28T00:29:08,246 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:53240 2024-11-28T00:29:08,247 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:42374 2024-11-28T00:29:11,052 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0005_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:29:11,757 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 02c31fc4be530862019024712f0950b0 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:29:11,757 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 04f788ca6a411174bb6c65eeb585071a changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:29:16,569 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000005/launch_container.sh] 2024-11-28T00:29:16,569 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000005/container_tokens] 2024-11-28T00:29:16,569 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000005/sysfs] 2024-11-28T00:29:16,611 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000004/launch_container.sh] 2024-11-28T00:29:16,611 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000004/container_tokens] 2024-11-28T00:29:16,611 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000004/sysfs] 2024-11-28T00:29:16,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742139_1315 (size=31810) 2024-11-28T00:29:16,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742139_1315 (size=31810) 2024-11-28T00:29:16,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742139_1315 (size=31810) 2024-11-28T00:29:16,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:29:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:29:16,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742140_1316 (size=463) 2024-11-28T00:29:16,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742141_1317 (size=31810) 2024-11-28T00:29:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742141_1317 (size=31810) 2024-11-28T00:29:16,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742141_1317 (size=31810) 2024-11-28T00:29:16,754 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000003/launch_container.sh] 2024-11-28T00:29:16,754 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000003/container_tokens] 2024-11-28T00:29:16,754 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000003/sysfs] 2024-11-28T00:29:16,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742142_1318 (size=349737) 2024-11-28T00:29:16,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742142_1318 (size=349737) 2024-11-28T00:29:16,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742142_1318 (size=349737) 2024-11-28T00:29:16,794 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:45792 2024-11-28T00:29:16,803 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:37880 2024-11-28T00:29:16,811 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:45806 2024-11-28T00:29:18,777 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:29:18,777 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:29:18,787 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-28T00:29:18,787 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:29:18,788 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:29:18,788 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:29:18,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:29:18,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:29:18,789 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@7d54afd5 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:29:18,790 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:29:18,790 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:29:18,792 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:18,842 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:18,842 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@7d54afd5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:29:18,845 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:29:18,869 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/.tmp/snaptb0-testConsecutiveExports 2024-11-28T00:29:18,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:18,936 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:18,937 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:19,320 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000002/launch_container.sh] 2024-11-28T00:29:19,320 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000002/container_tokens] 2024-11-28T00:29:19,320 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000002/sysfs] 2024-11-28T00:29:20,139 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-12899663058035865624.jar 2024-11-28T00:29:20,140 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,140 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,239 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-9015374911537183754.jar 2024-11-28T00:29:20,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,240 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,241 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:20,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:29:20,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:29:20,242 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:29:20,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:29:20,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:29:20,243 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:29:20,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:29:20,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:29:20,244 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:29:20,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:29:20,245 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:29:20,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:20,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:20,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:20,246 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:20,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:20,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:20,247 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:20,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742143_1319 (size=131440) 2024-11-28T00:29:20,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742143_1319 (size=131440) 2024-11-28T00:29:20,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742143_1319 (size=131440) 2024-11-28T00:29:20,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742144_1320 (size=4188619) 2024-11-28T00:29:20,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742144_1320 (size=4188619) 2024-11-28T00:29:20,377 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742144_1320 (size=4188619) 2024-11-28T00:29:20,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742145_1321 (size=1323991) 2024-11-28T00:29:20,398 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742145_1321 (size=1323991) 2024-11-28T00:29:20,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742145_1321 (size=1323991) 2024-11-28T00:29:20,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742146_1322 (size=903927) 2024-11-28T00:29:20,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742146_1322 (size=903927) 2024-11-28T00:29:20,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742146_1322 (size=903927) 2024-11-28T00:29:20,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742147_1323 (size=8360083) 2024-11-28T00:29:20,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742147_1323 (size=8360083) 2024-11-28T00:29:20,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742147_1323 (size=8360083) 2024-11-28T00:29:20,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742148_1324 (size=6424743) 2024-11-28T00:29:20,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742148_1324 (size=6424743) 2024-11-28T00:29:20,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742148_1324 (size=6424743) 2024-11-28T00:29:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742149_1325 (size=1877034) 2024-11-28T00:29:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742149_1325 (size=1877034) 2024-11-28T00:29:20,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742149_1325 (size=1877034) 2024-11-28T00:29:20,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742150_1326 (size=77835) 2024-11-28T00:29:20,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742150_1326 (size=77835) 2024-11-28T00:29:20,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742150_1326 (size=77835) 2024-11-28T00:29:20,653 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742151_1327 (size=30949) 2024-11-28T00:29:20,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742151_1327 (size=30949) 2024-11-28T00:29:20,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742151_1327 (size=30949) 2024-11-28T00:29:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742152_1328 (size=1597213) 2024-11-28T00:29:20,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742152_1328 (size=1597213) 2024-11-28T00:29:20,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742152_1328 (size=1597213) 2024-11-28T00:29:20,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742153_1329 (size=440957) 2024-11-28T00:29:20,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742153_1329 (size=440957) 2024-11-28T00:29:20,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742153_1329 (size=440957) 2024-11-28T00:29:20,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742154_1330 (size=4695811) 2024-11-28T00:29:20,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742154_1330 (size=4695811) 2024-11-28T00:29:20,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742154_1330 (size=4695811) 2024-11-28T00:29:20,796 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742155_1331 (size=232957) 2024-11-28T00:29:20,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742155_1331 (size=232957) 2024-11-28T00:29:20,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742155_1331 (size=232957) 2024-11-28T00:29:20,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742156_1332 (size=127628) 2024-11-28T00:29:20,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742156_1332 (size=127628) 2024-11-28T00:29:20,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742156_1332 (size=127628) 2024-11-28T00:29:20,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742157_1333 (size=20406) 2024-11-28T00:29:20,849 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742157_1333 (size=20406) 2024-11-28T00:29:20,850 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742157_1333 (size=20406) 2024-11-28T00:29:20,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742158_1334 (size=5175431) 2024-11-28T00:29:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742158_1334 (size=5175431) 2024-11-28T00:29:20,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742158_1334 (size=5175431) 2024-11-28T00:29:20,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742159_1335 (size=217634) 2024-11-28T00:29:20,967 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742159_1335 (size=217634) 2024-11-28T00:29:20,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742159_1335 (size=217634) 2024-11-28T00:29:21,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742160_1336 (size=1832290) 2024-11-28T00:29:21,007 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742160_1336 (size=1832290) 2024-11-28T00:29:21,008 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742160_1336 (size=1832290) 2024-11-28T00:29:21,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742161_1337 (size=322274) 2024-11-28T00:29:21,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742161_1337 (size=322274) 2024-11-28T00:29:21,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742161_1337 (size=322274) 2024-11-28T00:29:21,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742162_1338 (size=503880) 2024-11-28T00:29:21,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742162_1338 (size=503880) 2024-11-28T00:29:21,076 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742162_1338 (size=503880) 2024-11-28T00:29:21,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742163_1339 (size=29229) 2024-11-28T00:29:21,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742163_1339 (size=29229) 2024-11-28T00:29:21,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742163_1339 (size=29229) 2024-11-28T00:29:21,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742164_1340 (size=24096) 2024-11-28T00:29:21,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742164_1340 (size=24096) 2024-11-28T00:29:21,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742164_1340 (size=24096) 2024-11-28T00:29:21,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742165_1341 (size=111872) 2024-11-28T00:29:21,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742165_1341 (size=111872) 2024-11-28T00:29:21,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742165_1341 (size=111872) 2024-11-28T00:29:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742166_1342 (size=45609) 2024-11-28T00:29:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742166_1342 (size=45609) 2024-11-28T00:29:21,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742166_1342 (size=45609) 2024-11-28T00:29:21,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742167_1343 (size=136454) 2024-11-28T00:29:21,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742167_1343 (size=136454) 2024-11-28T00:29:21,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742167_1343 (size=136454) 2024-11-28T00:29:21,165 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:29:21,168 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testConsecutiveExports' hfile list 2024-11-28T00:29:21,170 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.3 K 2024-11-28T00:29:21,170 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=7.9 K 2024-11-28T00:29:21,170 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=6.0 K 2024-11-28T00:29:21,170 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:29:21,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:29:21,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:29:21,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742168_1344 (size=1023) 2024-11-28T00:29:21,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:29:21,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:29:21,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742169_1345 (size=35) 2024-11-28T00:29:21,232 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742170_1346 (size=304043) 2024-11-28T00:29:21,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742170_1346 (size=304043) 2024-11-28T00:29:21,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742170_1346 (size=304043) 2024-11-28T00:29:22,890 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:29:22,890 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:29:22,894 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0005_000001 (auth:SIMPLE) from 127.0.0.1:35736 2024-11-28T00:29:22,906 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000001/launch_container.sh] 2024-11-28T00:29:22,906 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000001/container_tokens] 2024-11-28T00:29:22,906 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0005/container_1732753513471_0005_01_000001/sysfs] 2024-11-28T00:29:23,740 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:55522 2024-11-28T00:29:31,766 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:44588 2024-11-28T00:29:32,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742171_1347 (size=349741) 2024-11-28T00:29:32,132 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742171_1347 (size=349741) 2024-11-28T00:29:32,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742171_1347 (size=349741) 2024-11-28T00:29:33,952 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:29:34,057 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:55888 2024-11-28T00:29:34,058 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:55004 2024-11-28T00:29:34,870 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:55012 2024-11-28T00:29:34,878 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:55904 2024-11-28T00:29:36,896 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0006_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:29:38,631 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 04f788ca6a411174bb6c65eeb585071a, had cached 0 bytes from a total of 6106 2024-11-28T00:29:38,633 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 02c31fc4be530862019024712f0950b0, had cached 0 bytes from a total of 14651 2024-11-28T00:29:40,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000003/launch_container.sh] 2024-11-28T00:29:40,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000003/container_tokens] 2024-11-28T00:29:40,698 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000003/sysfs] 2024-11-28T00:29:42,749 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000005/launch_container.sh] 2024-11-28T00:29:42,749 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000005/container_tokens] 2024-11-28T00:29:42,749 WARN [ContainersLauncher #1 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000005/sysfs] 2024-11-28T00:29:43,005 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000004/launch_container.sh] 2024-11-28T00:29:43,005 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000004/container_tokens] 2024-11-28T00:29:43,005 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000004/sysfs] 2024-11-28T00:29:43,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742172_1348 (size=29750) 2024-11-28T00:29:43,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742172_1348 (size=29750) 2024-11-28T00:29:43,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742172_1348 (size=29750) 2024-11-28T00:29:43,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:29:43,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:29:43,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742173_1349 (size=463) 2024-11-28T00:29:43,109 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000002/launch_container.sh] 2024-11-28T00:29:43,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000002/container_tokens] 2024-11-28T00:29:43,110 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000002/sysfs] 2024-11-28T00:29:43,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742174_1350 (size=29750) 2024-11-28T00:29:43,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742174_1350 (size=29750) 2024-11-28T00:29:43,129 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742174_1350 (size=29750) 2024-11-28T00:29:43,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742175_1351 (size=349741) 2024-11-28T00:29:43,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742175_1351 (size=349741) 2024-11-28T00:29:43,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742175_1351 (size=349741) 2024-11-28T00:29:43,183 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:33618 2024-11-28T00:29:43,190 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:33626 2024-11-28T00:29:44,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:29:44,481 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:29:44,485 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testConsecutiveExports 2024-11-28T00:29:44,485 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:29:44,485 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:29:44,486 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:29:44,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:29:44,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:29:44,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in org.apache.hadoop.fs.LocalFileSystem@7d54afd5 in root file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports at file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports 2024-11-28T00:29:44,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports/data.manifest 2024-11-28T00:29:44,487 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753734871/.hbase-snapshot/snaptb0-testConsecutiveExports/.snapshotinfo 2024-11-28T00:29:44,503 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testConsecutiveExports 2024-11-28T00:29:44,504 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=125, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,506 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:29:44,506 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753784506"}]},"ts":"1732753784506"} 2024-11-28T00:29:44,508 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLING in hbase:meta 2024-11-28T00:29:44,508 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(284): Set testtb-testConsecutiveExports to state=DISABLING 2024-11-28T00:29:44,509 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=126, ppid=125, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports}] 2024-11-28T00:29:44,511 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, UNASSIGN}, {pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, UNASSIGN}] 2024-11-28T00:29:44,512 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, UNASSIGN 2024-11-28T00:29:44,512 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, UNASSIGN 2024-11-28T00:29:44,513 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=04f788ca6a411174bb6c65eeb585071a, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:29:44,513 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=02c31fc4be530862019024712f0950b0, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:44,514 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=127, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, UNASSIGN because future has completed 2024-11-28T00:29:44,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=128, ppid=126, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, UNASSIGN because future has completed 2024-11-28T00:29:44,516 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:29:44,516 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:29:44,516 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:29:44,516 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:29:44,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:29:44,669 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(122): Close 02c31fc4be530862019024712f0950b0 2024-11-28T00:29:44,669 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(122): Close 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1722): Closing 04f788ca6a411174bb6c65eeb585071a, disabling compactions & flushes 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1722): Closing 02c31fc4be530862019024712f0950b0, disabling compactions & flushes 2024-11-28T00:29:44,669 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:29:44,669 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1755): Closing region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. after waiting 0 ms 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1843): Acquired close lock on testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. after waiting 0 ms 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:29:44,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1853): Updates disabled for region testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:29:44,675 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:29:44,675 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:29:44,679 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:29:44,679 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:29:44,679 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a. 2024-11-28T00:29:44,679 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1973): Closed testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0. 2024-11-28T00:29:44,679 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] regionserver.HRegion(1676): Region close journal for 04f788ca6a411174bb6c65eeb585071a: Waiting for close lock at 1732753784669Running coprocessor pre-close hooks at 1732753784669Disabling compacts and flushes for region at 1732753784669Disabling writes for close at 1732753784669Writing region close event to WAL at 1732753784670 (+1 ms)Running coprocessor post-close hooks at 1732753784679 (+9 ms)Closed at 1732753784679 2024-11-28T00:29:44,679 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] regionserver.HRegion(1676): Region close journal for 02c31fc4be530862019024712f0950b0: Waiting for close lock at 1732753784669Running coprocessor pre-close hooks at 1732753784669Disabling compacts and flushes for region at 1732753784669Disabling writes for close at 1732753784669Writing region close event to WAL at 1732753784670 (+1 ms)Running coprocessor post-close hooks at 1732753784679 (+9 ms)Closed at 1732753784679 2024-11-28T00:29:44,682 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=129}] handler.UnassignRegionHandler(157): Closed 02c31fc4be530862019024712f0950b0 2024-11-28T00:29:44,682 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=128 updating hbase:meta row=02c31fc4be530862019024712f0950b0, regionState=CLOSED 2024-11-28T00:29:44,682 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=130}] handler.UnassignRegionHandler(157): Closed 04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:29:44,685 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=127 updating hbase:meta row=04f788ca6a411174bb6c65eeb585071a, regionState=CLOSED 2024-11-28T00:29:44,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=129, ppid=128, state=RUNNABLE, hasLock=false; CloseRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:29:44,687 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=130, ppid=127, state=RUNNABLE, hasLock=false; CloseRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:29:44,690 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=130, resume processing ppid=127 2024-11-28T00:29:44,691 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=130, ppid=127, state=SUCCESS, hasLock=false; CloseRegionProcedure 04f788ca6a411174bb6c65eeb585071a, server=72c8c7f04009,43335,1732753506004 in 172 msec 2024-11-28T00:29:44,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=129, resume processing ppid=128 2024-11-28T00:29:44,691 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=129, ppid=128, state=SUCCESS, hasLock=false; CloseRegionProcedure 02c31fc4be530862019024712f0950b0, server=72c8c7f04009,41621,1732753506207 in 172 msec 2024-11-28T00:29:44,692 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=127, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=04f788ca6a411174bb6c65eeb585071a, UNASSIGN in 180 msec 2024-11-28T00:29:44,693 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=128, resume processing ppid=126 2024-11-28T00:29:44,694 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=128, ppid=126, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testConsecutiveExports, region=02c31fc4be530862019024712f0950b0, UNASSIGN in 180 msec 2024-11-28T00:29:44,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=126, resume processing ppid=125 2024-11-28T00:29:44,696 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=126, ppid=125, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testConsecutiveExports in 185 msec 2024-11-28T00:29:44,697 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753784697"}]},"ts":"1732753784697"} 2024-11-28T00:29:44,699 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testConsecutiveExports, state=DISABLED in hbase:meta 2024-11-28T00:29:44,699 INFO [PEWorker-3 {}] procedure.DisableTableProcedure(296): Set testtb-testConsecutiveExports to state=DISABLED 2024-11-28T00:29:44,701 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=125, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testConsecutiveExports in 196 msec 2024-11-28T00:29:44,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=125 2024-11-28T00:29:44,821 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:29:44,822 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testConsecutiveExports 2024-11-28T00:29:44,823 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,824 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=131, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,824 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testConsecutiveExports 2024-11-28T00:29:44,825 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=131, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testConsecutiveExports 2024-11-28T00:29:44,830 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:29:44,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,832 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,833 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/recovered.edits] 2024-11-28T00:29:44,833 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:29:44,833 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0 2024-11-28T00:29:44,834 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:29:44,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:29:44,835 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testConsecutiveExports with data PBUF 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testConsecutiveExports 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:44,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:44,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-28T00:29:44,837 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/recovered.edits] 2024-11-28T00:29:44,840 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/cf/d8cde27ab2d944ee902ea30deb2adba2 2024-11-28T00:29:44,841 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/cf/2de5ab5c7a7743bcab2dcd8ff818cb0f 2024-11-28T00:29:44,845 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0/recovered.edits/9.seqid 2024-11-28T00:29:44,846 DEBUG [HFileArchiver-15 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/02c31fc4be530862019024712f0950b0 2024-11-28T00:29:44,848 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a/recovered.edits/9.seqid 2024-11-28T00:29:44,848 DEBUG [HFileArchiver-14 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testConsecutiveExports/04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:29:44,848 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(313): Archived testtb-testConsecutiveExports regions 2024-11-28T00:29:44,849 DEBUG [PEWorker-5 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-28T00:29:44,850 DEBUG [PEWorker-5 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf] 2024-11-28T00:29:44,854 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/c4ca4238a0b923820dcc509a6f75849b20241128fe1ef111f7594409b0a3eae5756ece3a_02c31fc4be530862019024712f0950b0 2024-11-28T00:29:44,855 DEBUG [PEWorker-5 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac/cf/d41d8cd98f00b204e9800998ecf8427e20241128cfa1a2a9421d4cbabacff63d24a65dd8_04f788ca6a411174bb6c65eeb585071a 2024-11-28T00:29:44,856 DEBUG [PEWorker-5 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testConsecutiveExports/c7b48a2e62736c517f8bfa7d64fc37ac 2024-11-28T00:29:44,859 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=131, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,862 WARN [PEWorker-5 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testConsecutiveExports from hbase:meta 2024-11-28T00:29:44,865 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testConsecutiveExports' descriptor. 2024-11-28T00:29:44,866 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=131, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,866 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testConsecutiveExports' from region states. 2024-11-28T00:29:44,867 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753784866"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:44,867 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753784866"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:44,869 INFO [PEWorker-5 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:29:44,869 DEBUG [PEWorker-5 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 04f788ca6a411174bb6c65eeb585071a, NAME => 'testtb-testConsecutiveExports,,1732753733148.04f788ca6a411174bb6c65eeb585071a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 02c31fc4be530862019024712f0950b0, NAME => 'testtb-testConsecutiveExports,1,1732753733148.02c31fc4be530862019024712f0950b0.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:29:44,869 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testConsecutiveExports' as deleted. 2024-11-28T00:29:44,869 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testConsecutiveExports","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753784869"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:44,871 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testConsecutiveExports state from META 2024-11-28T00:29:44,872 DEBUG [PEWorker-5 {}] procedure.DeleteTableProcedure(133): Finished pid=131, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testConsecutiveExports 2024-11-28T00:29:44,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=131, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testConsecutiveExports in 50 msec 2024-11-28T00:29:44,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=131 2024-11-28T00:29:44,942 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testConsecutiveExports 2024-11-28T00:29:44,942 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testConsecutiveExports completed 2024-11-28T00:29:44,950 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testConsecutiveExports" type: DISABLED 2024-11-28T00:29:44,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testConsecutiveExports 2024-11-28T00:29:44,954 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testConsecutiveExports" type: DISABLED 2024-11-28T00:29:44,956 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testConsecutiveExports 2024-11-28T00:29:44,982 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testConsecutiveExports Thread=800 (was 793) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:36294 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_1718617127_1 at /127.0.0.1:55854 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:39481 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #10 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #9 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 3917) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:59696 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LogDeleter #3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1177) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:39481 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-15 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #7 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-5373 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ApplicationMasterLauncher #8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:55878 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=782 (was 807), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1064 (was 990) - SystemLoadAverage LEAK? -, ProcessCount=15 (was 17), AvailableMemoryMB=3281 (was 3729) 2024-11-28T00:29:44,982 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-11-28T00:29:45,006 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=800, OpenFileDescriptor=782, MaxFileDescriptor=1048576, SystemLoadAverage=1064, ProcessCount=15, AvailableMemoryMB=3277 2024-11-28T00:29:45,006 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=800 is superior to 500 2024-11-28T00:29:45,008 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:29:45,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,010 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:29:45,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion" procId is: 132 2024-11-28T00:29:45,011 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:29:45,011 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:29:45,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:29:45,029 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:29:45,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742176_1352 (size=458) 2024-11-28T00:29:45,033 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => daf7088603a9f84f2be699938b8bcdb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:45,034 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => a29845d15726dd0a5b11d9ae28fd4c0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:45,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:29:45,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:29:45,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742178_1354 (size=83) 2024-11-28T00:29:45,049 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:45,049 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1722): Closing a29845d15726dd0a5b11d9ae28fd4c0c, disabling compactions & flushes 2024-11-28T00:29:45,049 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,049 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,049 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. after waiting 0 ms 2024-11-28T00:29:45,049 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,049 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,050 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-1 {}] regionserver.HRegion(1676): Region close journal for a29845d15726dd0a5b11d9ae28fd4c0c: Waiting for close lock at 1732753785049Disabling compacts and flushes for region at 1732753785049Disabling writes for close at 1732753785049Writing region close event to WAL at 1732753785049Closed at 1732753785049 2024-11-28T00:29:45,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:29:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:29:45,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742177_1353 (size=83) 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1722): Closing daf7088603a9f84f2be699938b8bcdb0, disabling compactions & flushes 2024-11-28T00:29:45,059 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. after waiting 0 ms 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,059 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,059 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-pool-0 {}] regionserver.HRegion(1676): Region close journal for daf7088603a9f84f2be699938b8bcdb0: Waiting for close lock at 1732753785059Disabling compacts and flushes for region at 1732753785059Disabling writes for close at 1732753785059Writing region close event to WAL at 1732753785059Closed at 1732753785059 2024-11-28T00:29:45,060 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:29:45,061 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732753785060"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753785060"}]},"ts":"1732753785060"} 2024-11-28T00:29:45,061 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.","families":{"info":[{"qualifier":"regioninfo","vlen":82,"tag":[],"timestamp":"1732753785060"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753785060"}]},"ts":"1732753785060"} 2024-11-28T00:29:45,063 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:29:45,065 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:29:45,065 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753785065"}]},"ts":"1732753785065"} 2024-11-28T00:29:45,066 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLING in hbase:meta 2024-11-28T00:29:45,067 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:29:45,068 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:29:45,068 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:29:45,068 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:29:45,068 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:29:45,068 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, ASSIGN}, {pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, ASSIGN}] 2024-11-28T00:29:45,069 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, ASSIGN 2024-11-28T00:29:45,069 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, ASSIGN 2024-11-28T00:29:45,070 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:29:45,070 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:29:45,121 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:29:45,221 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:29:45,221 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:45,221 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=daf7088603a9f84f2be699938b8bcdb0, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:29:45,224 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=134, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, ASSIGN because future has completed 2024-11-28T00:29:45,224 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:29:45,225 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=133, ppid=132, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, ASSIGN because future has completed 2024-11-28T00:29:45,225 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:29:45,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:29:45,380 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,380 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7752): Opening region: {ENCODED => a29845d15726dd0a5b11d9ae28fd4c0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:29:45,380 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. service=AccessControlService 2024-11-28T00:29:45,380 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:29:45,381 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,381 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:45,381 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7794): checking encryption for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,381 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(7797): checking classloading for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,382 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,382 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7752): Opening region: {ENCODED => daf7088603a9f84f2be699938b8bcdb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:29:45,382 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. service=AccessControlService 2024-11-28T00:29:45,382 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:29:45,382 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,382 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:45,383 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7794): checking encryption for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,383 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(7797): checking classloading for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,384 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,385 INFO [StoreOpener-daf7088603a9f84f2be699938b8bcdb0-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,386 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a29845d15726dd0a5b11d9ae28fd4c0c columnFamilyName cf 2024-11-28T00:29:45,387 INFO [StoreOpener-daf7088603a9f84f2be699938b8bcdb0-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region daf7088603a9f84f2be699938b8bcdb0 columnFamilyName cf 2024-11-28T00:29:45,387 DEBUG [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:45,387 DEBUG [StoreOpener-daf7088603a9f84f2be699938b8bcdb0-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:45,387 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] regionserver.HStore(327): Store=a29845d15726dd0a5b11d9ae28fd4c0c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:29:45,387 INFO [StoreOpener-daf7088603a9f84f2be699938b8bcdb0-1 {}] regionserver.HStore(327): Store=daf7088603a9f84f2be699938b8bcdb0/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:29:45,388 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1038): replaying wal for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,388 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1038): replaying wal for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,389 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,389 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,389 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,389 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1048): stopping wal replay for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,389 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1060): Cleaning up temporary data for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,391 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1093): writing seq id for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,392 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,392 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1048): stopping wal replay for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,392 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1060): Cleaning up temporary data for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,393 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:29:45,394 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1114): Opened a29845d15726dd0a5b11d9ae28fd4c0c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=65775471, jitterRate=-0.019869104027748108}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:29:45,394 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,395 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegion(1006): Region open journal for a29845d15726dd0a5b11d9ae28fd4c0c: Running coprocessor pre-open hook at 1732753785381Writing region info on filesystem at 1732753785381Initializing all the Stores at 1732753785384 (+3 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753785384Cleaning up temporary data from old regions at 1732753785389 (+5 ms)Running coprocessor post-open hooks at 1732753785394 (+5 ms)Region opened successfully at 1732753785395 (+1 ms) 2024-11-28T00:29:45,395 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1093): writing seq id for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,396 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c., pid=135, masterSystemTime=1732753785377 2024-11-28T00:29:45,398 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:29:45,398 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,398 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=135}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,398 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1114): Opened daf7088603a9f84f2be699938b8bcdb0; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60109803, jitterRate=-0.10429413616657257}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:29:45,398 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1122): Running coprocessor post-open hooks for daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,398 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegion(1006): Region open journal for daf7088603a9f84f2be699938b8bcdb0: Running coprocessor pre-open hook at 1732753785383Writing region info on filesystem at 1732753785383Initializing all the Stores at 1732753785384 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753785384Cleaning up temporary data from old regions at 1732753785392 (+8 ms)Running coprocessor post-open hooks at 1732753785398 (+6 ms)Region opened successfully at 1732753785398 2024-11-28T00:29:45,399 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=134 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:45,399 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0., pid=136, masterSystemTime=1732753785379 2024-11-28T00:29:45,401 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=135, ppid=134, state=RUNNABLE, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:29:45,401 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,401 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=136}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,401 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=133 updating hbase:meta row=daf7088603a9f84f2be699938b8bcdb0, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:29:45,403 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=136, ppid=133, state=RUNNABLE, hasLock=false; OpenRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:29:45,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=135, resume processing ppid=134 2024-11-28T00:29:45,403 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=135, ppid=134, state=SUCCESS, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207 in 177 msec 2024-11-28T00:29:45,405 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=134, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, ASSIGN in 335 msec 2024-11-28T00:29:45,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=136, resume processing ppid=133 2024-11-28T00:29:45,406 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=136, ppid=133, state=SUCCESS, hasLock=false; OpenRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332 in 179 msec 2024-11-28T00:29:45,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=133, resume processing ppid=132 2024-11-28T00:29:45,407 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=133, ppid=132, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, ASSIGN in 338 msec 2024-11-28T00:29:45,408 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:29:45,408 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753785408"}]},"ts":"1732753785408"} 2024-11-28T00:29:45,410 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=ENABLED in hbase:meta 2024-11-28T00:29:45,416 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=132, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:29:45,416 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion jenkins: RWXCA 2024-11-28T00:29:45,419 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:29:45,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:45,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:45,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:45,421 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:45,423 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:45,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:45,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:45,424 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:45,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=132, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 416 msec 2024-11-28T00:29:45,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion Metrics about Tables on a single HBase RegionServer 2024-11-28T00:29:45,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testConsecutiveExports 2024-11-28T00:29:45,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=132 2024-11-28T00:29:45,641 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:29:45,641 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:45,644 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,644 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,644 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:29:45,646 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:45,651 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:45,656 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:45,659 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:29:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753785659 (current time:1732753785659). 2024-11-28T00:29:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:29:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-28T00:29:45,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:29:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7d57fc3f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:45,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:45,661 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:45,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:45,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:45,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6b6966ff, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,661 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:45,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:45,662 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,663 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48478, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:45,663 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@45741e3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:45,664 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:45,665 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:45,666 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55634, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:45,667 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,667 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,667 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@166c5635, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:45,668 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:45,669 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:45,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:45,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:45,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ce6819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,669 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:45,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:45,670 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,671 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48502, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:45,671 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7935f54b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:45,672 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:45,672 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:45,673 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:45,674 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:45,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:29:45,675 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:45,676 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39978, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:45,677 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,677 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:45,678 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:29:45,678 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:29:45,679 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:29:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-28T00:29:45,680 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:29:45,680 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:29:45,681 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:29:45,684 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:29:45,691 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:29:45,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:29:45,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742179_1355 (size=215) 2024-11-28T00:29:45,697 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:29:45,697 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0}, {pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c}] 2024-11-28T00:29:45,698 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,698 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,791 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:29:45,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=139 2024-11-28T00:29:45,850 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=138 2024-11-28T00:29:45,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,850 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.HRegion(2603): Flush status journal for a29845d15726dd0a5b11d9ae28fd4c0c: 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.HRegion(2603): Flush status journal for daf7088603a9f84f2be699938b8bcdb0: 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. for emptySnaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:29:45,851 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:29:45,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:29:45,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:29:45,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:29:45,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:45,866 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=138}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=138 2024-11-28T00:29:45,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742181_1357 (size=86) 2024-11-28T00:29:45,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:29:45,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742180_1356 (size=86) 2024-11-28T00:29:45,867 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=138 2024-11-28T00:29:45,867 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,867 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=138, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:45,869 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=138, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 in 171 msec 2024-11-28T00:29:45,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:45,872 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=139}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=139 2024-11-28T00:29:45,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=139 2024-11-28T00:29:45,873 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithMergeRegion on region a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,873 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=139, ppid=137, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:45,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=139, resume processing ppid=137 2024-11-28T00:29:45,875 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=139, ppid=137, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c in 177 msec 2024-11-28T00:29:45,875 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:29:45,876 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:29:45,877 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:29:45,878 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:29:45,878 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:45,878 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:29:45,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:29:45,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:29:45,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742182_1358 (size=78) 2024-11-28T00:29:45,920 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:29:45,920 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,921 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:29:45,950 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:29:45,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742183_1359 (size=713) 2024-11-28T00:29:45,954 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:29:45,969 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:29:45,969 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:45,972 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=137, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:29:45,972 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 137 2024-11-28T00:29:45,976 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=137, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=137, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 293 msec 2024-11-28T00:29:46,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=137 2024-11-28T00:29:46,001 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:29:46,010 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:29:46,012 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:29:46,014 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:46,018 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,018 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:46,018 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:29:46,020 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:46,026 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:46,032 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion ,, for max=2147483647 with caching=100 2024-11-28T00:29:46,035 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:29:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753786035 (current time:1732753786035). 2024-11-28T00:29:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:29:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion VERSION not specified, setting to 2 2024-11-28T00:29:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:29:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1848fdd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:46,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:46,042 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:46,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:46,042 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:46,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@645823f5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:46,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:46,043 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,045 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:46,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6e995786, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,046 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:46,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:46,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:46,049 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55650, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:46,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,051 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@21f29f69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:46,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:46,053 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:46,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:46,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:46,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fd692a6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:46,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:46,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,055 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:46,056 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b1ef739, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:46,057 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:46,058 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:46,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:46,060 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55654, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:46,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:29:46,062 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:46,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39992, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:46,064 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:46,064 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion], kv [jenkins: RWXCA] 2024-11-28T00:29:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:29:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } 2024-11-28T00:29:46,067 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-28T00:29:46,068 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:29:46,069 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:29:46,069 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:29:46,071 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:29:46,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:29:46,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:29:46,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742184_1360 (size=210) 2024-11-28T00:29:46,090 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:29:46,091 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0}, {pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c}] 2024-11-28T00:29:46,092 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:46,092 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:46,181 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:29:46,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=141 2024-11-28T00:29:46,244 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=142 2024-11-28T00:29:46,244 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:46,244 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:46,245 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2902): Flushing a29845d15726dd0a5b11d9ae28fd4c0c 1/1 column families, dataSize=3.00 KB heapSize=6.72 KB 2024-11-28T00:29:46,245 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2902): Flushing daf7088603a9f84f2be699938b8bcdb0 1/1 column families, dataSize=266 B heapSize=832 B 2024-11-28T00:29:46,293 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 is 71, key is 05651f1ee3a531ee906bac0badc34dfc/cf:q/1732753786010/Put/seqid=0 2024-11-28T00:29:46,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742185_1361 (size=5171) 2024-11-28T00:29:46,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742185_1361 (size=5171) 2024-11-28T00:29:46,308 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c is 71, key is 1597d93da3b0fcfb46908b60b2beda16/cf:q/1732753786012/Put/seqid=0 2024-11-28T00:29:46,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742185_1361 (size=5171) 2024-11-28T00:29:46,309 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:46,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742186_1362 (size=8102) 2024-11-28T00:29:46,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742186_1362 (size=8102) 2024-11-28T00:29:46,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742186_1362 (size=8102) 2024-11-28T00:29:46,315 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:46,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:46,316 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/.tmp/cf/a581c0998e50422583e8b5bccb7aaf0d, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=daf7088603a9f84f2be699938b8bcdb0] 2024-11-28T00:29:46,317 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/.tmp/cf/a581c0998e50422583e8b5bccb7aaf0d is 224, key is 0b4c2003c6f053b2bfad5399069affbda/cf:q/1732753786010/Put/seqid=0 2024-11-28T00:29:46,320 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:46,321 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/.tmp/cf/a91709a9e898417aa77de1246eb2929d, store: [table=testtb-testExportFileSystemStateWithMergeRegion family=cf region=a29845d15726dd0a5b11d9ae28fd4c0c] 2024-11-28T00:29:46,322 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/.tmp/cf/a91709a9e898417aa77de1246eb2929d is 224, key is 14757d6140ff526bc74c87c9f234ff01c/cf:q/1732753786012/Put/seqid=0 2024-11-28T00:29:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742187_1363 (size=6196) 2024-11-28T00:29:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742187_1363 (size=6196) 2024-11-28T00:29:46,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742187_1363 (size=6196) 2024-11-28T00:29:46,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742188_1364 (size=15499) 2024-11-28T00:29:46,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742188_1364 (size=15499) 2024-11-28T00:29:46,372 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.0 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/.tmp/cf/a91709a9e898417aa77de1246eb2929d 2024-11-28T00:29:46,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742188_1364 (size=15499) 2024-11-28T00:29:46,379 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/.tmp/cf/a91709a9e898417aa77de1246eb2929d as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d 2024-11-28T00:29:46,387 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d, entries=46, sequenceid=6, filesize=15.1 K 2024-11-28T00:29:46,388 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(3140): Finished flush of dataSize ~3.00 KB/3070, heapSize ~6.70 KB/6864, currentSize=0 B/0 for a29845d15726dd0a5b11d9ae28fd4c0c in 143ms, sequenceid=6, compaction requested=false 2024-11-28T00:29:46,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion' 2024-11-28T00:29:46,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.HRegion(2603): Flush status journal for a29845d15726dd0a5b11d9ae28fd4c0c: 2024-11-28T00:29:46,389 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:29:46,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:29:46,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d] hfiles 2024-11-28T00:29:46,390 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,391 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:29:46,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:29:46,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:29:46,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742189_1365 (size=125) 2024-11-28T00:29:46,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:29:46,453 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=142}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=142 2024-11-28T00:29:46,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=142 2024-11-28T00:29:46,454 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:46,455 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=142, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:46,457 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=142, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c in 365 msec 2024-11-28T00:29:46,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:29:46,742 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=266, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/.tmp/cf/a581c0998e50422583e8b5bccb7aaf0d 2024-11-28T00:29:46,754 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/.tmp/cf/a581c0998e50422583e8b5bccb7aaf0d as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d 2024-11-28T00:29:46,763 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d, entries=4, sequenceid=6, filesize=6.1 K 2024-11-28T00:29:46,765 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(3140): Finished flush of dataSize ~266 B/266, heapSize ~816 B/816, currentSize=0 B/0 for daf7088603a9f84f2be699938b8bcdb0 in 520ms, sequenceid=6, compaction requested=false 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.HRegion(2603): Flush status journal for daf7088603a9f84f2be699938b8bcdb0: 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. for snaptb0-testExportFileSystemStateWithMergeRegion completed. 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d] hfiles 2024-11-28T00:29:46,765 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,792 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:29:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:29:46,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742190_1366 (size=125) 2024-11-28T00:29:46,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:29:46,795 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=141}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=141 2024-11-28T00:29:46,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=141 2024-11-28T00:29:46,796 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion on region daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:46,796 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=141, ppid=140, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:46,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=141, resume processing ppid=140 2024-11-28T00:29:46,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=141, ppid=140, state=SUCCESS, hasLock=false; SnapshotRegionProcedure daf7088603a9f84f2be699938b8bcdb0 in 706 msec 2024-11-28T00:29:46,800 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:29:46,801 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:29:46,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:29:46,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:29:46,803 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:46,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0] hfiles 2024-11-28T00:29:46,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:29:46,805 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:29:46,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:29:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:29:46,832 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742191_1367 (size=309) 2024-11-28T00:29:46,834 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:29:46,834 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,835 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:29:46,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:29:46,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742192_1368 (size=1023) 2024-11-28T00:29:46,861 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:29:46,869 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:29:46,869 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:29:46,871 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=140, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:29:46,871 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 }, snapshot procedure id = 140 2024-11-28T00:29:46,872 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=140, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=140, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion table=testtb-testExportFileSystemStateWithMergeRegion type=FLUSH ttl=0 } in 805 msec 2024-11-28T00:29:47,211 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=140 2024-11-28T00:29:47,211 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:29:47,213 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:29:47,213 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:29:47,215 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55656, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:29:47,215 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-28T00:29:47,216 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40004, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:29:47,218 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54894, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-28T00:29:47,220 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithMergeRegion-1', {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:29:47,221 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:47,222 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:29:47,222 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:47,222 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithMergeRegion-1" procId is: 143 2024-11-28T00:29:47,223 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:29:47,223 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:29:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:29:47,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:29:47,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742193_1369 (size=399) 2024-11-28T00:29:47,253 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => fee8516c778b886537c19942d5a2d0cb, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.', STARTKEY => '2', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:47,253 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a0fc42ec4643c853998d79771fbf60cc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.', STARTKEY => '', ENDKEY => '2'}, tableDescriptor='testtb-testExportFileSystemStateWithMergeRegion-1', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:29:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:29:47,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:29:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:29:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742194_1370 (size=85) 2024-11-28T00:29:47,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742195_1371 (size=85) 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1722): Closing a0fc42ec4643c853998d79771fbf60cc, disabling compactions & flushes 2024-11-28T00:29:47,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. after waiting 0 ms 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,288 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,288 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-0 {}] regionserver.HRegion(1676): Region close journal for a0fc42ec4643c853998d79771fbf60cc: Waiting for close lock at 1732753787288Disabling compacts and flushes for region at 1732753787288Disabling writes for close at 1732753787288Writing region close event to WAL at 1732753787288Closed at 1732753787288 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1722): Closing fee8516c778b886537c19942d5a2d0cb, disabling compactions & flushes 2024-11-28T00:29:47,289 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. after waiting 0 ms 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,289 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,289 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithMergeRegion-1-pool-1 {}] regionserver.HRegion(1676): Region close journal for fee8516c778b886537c19942d5a2d0cb: Waiting for close lock at 1732753787289Disabling compacts and flushes for region at 1732753787289Disabling writes for close at 1732753787289Writing region close event to WAL at 1732753787289Closed at 1732753787289 2024-11-28T00:29:47,290 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:29:47,290 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732753787290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753787290"}]},"ts":"1732753787290"} 2024-11-28T00:29:47,291 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.","families":{"info":[{"qualifier":"regioninfo","vlen":84,"tag":[],"timestamp":"1732753787290"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753787290"}]},"ts":"1732753787290"} 2024-11-28T00:29:47,293 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:29:47,294 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:29:47,295 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753787294"}]},"ts":"1732753787294"} 2024-11-28T00:29:47,296 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLING in hbase:meta 2024-11-28T00:29:47,296 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:29:47,298 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:29:47,298 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:29:47,298 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:29:47,298 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:29:47,299 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, ASSIGN}, {pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, ASSIGN}] 2024-11-28T00:29:47,300 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, ASSIGN 2024-11-28T00:29:47,300 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, ASSIGN 2024-11-28T00:29:47,301 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:29:47,301 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:29:47,331 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:29:47,452 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:29:47,452 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=fee8516c778b886537c19942d5a2d0cb, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:29:47,452 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=a0fc42ec4643c853998d79771fbf60cc, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:47,455 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=144, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, ASSIGN because future has completed 2024-11-28T00:29:47,455 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:29:47,456 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=145, ppid=143, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, ASSIGN because future has completed 2024-11-28T00:29:47,457 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:29:47,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:29:47,611 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,611 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7752): Opening region: {ENCODED => a0fc42ec4643c853998d79771fbf60cc, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.', STARTKEY => '', ENDKEY => '2'} 2024-11-28T00:29:47,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. service=AccessControlService 2024-11-28T00:29:47,612 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:29:47,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,612 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:47,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7752): Opening region: {ENCODED => fee8516c778b886537c19942d5a2d0cb, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.', STARTKEY => '2', ENDKEY => ''} 2024-11-28T00:29:47,612 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7794): checking encryption for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(7797): checking classloading for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. service=AccessControlService 2024-11-28T00:29:47,613 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7794): checking encryption for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,613 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(7797): checking classloading for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,614 INFO [StoreOpener-a0fc42ec4643c853998d79771fbf60cc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,614 INFO [StoreOpener-fee8516c778b886537c19942d5a2d0cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,616 INFO [StoreOpener-a0fc42ec4643c853998d79771fbf60cc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a0fc42ec4643c853998d79771fbf60cc columnFamilyName cf 2024-11-28T00:29:47,616 INFO [StoreOpener-fee8516c778b886537c19942d5a2d0cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region fee8516c778b886537c19942d5a2d0cb columnFamilyName cf 2024-11-28T00:29:47,616 DEBUG [StoreOpener-fee8516c778b886537c19942d5a2d0cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:47,616 DEBUG [StoreOpener-a0fc42ec4643c853998d79771fbf60cc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:47,616 INFO [StoreOpener-a0fc42ec4643c853998d79771fbf60cc-1 {}] regionserver.HStore(327): Store=a0fc42ec4643c853998d79771fbf60cc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:29:47,616 INFO [StoreOpener-fee8516c778b886537c19942d5a2d0cb-1 {}] regionserver.HStore(327): Store=fee8516c778b886537c19942d5a2d0cb/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:29:47,616 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1038): replaying wal for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,616 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1038): replaying wal for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,617 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,617 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1048): stopping wal replay for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1060): Cleaning up temporary data for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1048): stopping wal replay for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,618 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1060): Cleaning up temporary data for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,620 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1093): writing seq id for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,620 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1093): writing seq id for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,622 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:29:47,622 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:29:47,622 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1114): Opened fee8516c778b886537c19942d5a2d0cb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68136576, jitterRate=0.015314102172851562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:29:47,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1122): Running coprocessor post-open hooks for fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:47,622 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1114): Opened a0fc42ec4643c853998d79771fbf60cc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72660378, jitterRate=0.08272400498390198}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:29:47,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:47,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegion(1006): Region open journal for a0fc42ec4643c853998d79771fbf60cc: Running coprocessor pre-open hook at 1732753787613Writing region info on filesystem at 1732753787613Initializing all the Stores at 1732753787614 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753787614Cleaning up temporary data from old regions at 1732753787618 (+4 ms)Running coprocessor post-open hooks at 1732753787623 (+5 ms)Region opened successfully at 1732753787623 2024-11-28T00:29:47,623 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegion(1006): Region open journal for fee8516c778b886537c19942d5a2d0cb: Running coprocessor pre-open hook at 1732753787613Writing region info on filesystem at 1732753787613Initializing all the Stores at 1732753787614 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753787614Cleaning up temporary data from old regions at 1732753787618 (+4 ms)Running coprocessor post-open hooks at 1732753787623 (+5 ms)Region opened successfully at 1732753787623 2024-11-28T00:29:47,624 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc., pid=146, masterSystemTime=1732753787608 2024-11-28T00:29:47,625 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb., pid=147, masterSystemTime=1732753787610 2024-11-28T00:29:47,627 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,627 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=146}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:47,627 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=144 updating hbase:meta row=a0fc42ec4643c853998d79771fbf60cc, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:47,627 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,628 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=147}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:47,632 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=145 updating hbase:meta row=fee8516c778b886537c19942d5a2d0cb, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:29:47,632 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=146, ppid=144, state=RUNNABLE, hasLock=false; OpenRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:29:47,634 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=147, ppid=145, state=RUNNABLE, hasLock=false; OpenRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:29:47,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=146, resume processing ppid=144 2024-11-28T00:29:47,636 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=146, ppid=144, state=SUCCESS, hasLock=false; OpenRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207 in 178 msec 2024-11-28T00:29:47,637 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=144, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, ASSIGN in 338 msec 2024-11-28T00:29:47,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=147, resume processing ppid=145 2024-11-28T00:29:47,639 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=147, ppid=145, state=SUCCESS, hasLock=false; OpenRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332 in 181 msec 2024-11-28T00:29:47,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=145, resume processing ppid=143 2024-11-28T00:29:47,641 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=145, ppid=143, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, ASSIGN in 341 msec 2024-11-28T00:29:47,642 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:29:47,642 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753787642"}]},"ts":"1732753787642"} 2024-11-28T00:29:47,644 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=ENABLED in hbase:meta 2024-11-28T00:29:47,645 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=143, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:29:47,645 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithMergeRegion-1 jenkins: RWXCA 2024-11-28T00:29:47,649 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-28T00:29:47,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:47,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:47,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:47,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:29:47,653 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,653 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,653 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,653 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,654 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF\x0AW\x0A\x07jenkins\x12L\x08\x03"H\x0A<\x0A\x07default\x121testtb-testExportFileSystemStateWithMergeRegion-1 \x00 \x01 \x02 \x03 \x04 2024-11-28T00:29:47,655 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=143, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 433 msec 2024-11-28T00:29:47,851 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=143 2024-11-28T00:29:47,852 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:29:47,860 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='1', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:29:47,866 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'testtb-testExportFileSystemStateWithMergeRegion-1', row='2', locateType=CURRENT is [region=testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb., hostname=72c8c7f04009,36427,1732753506332, seqNum=2] 2024-11-28T00:29:47,870 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithMergeRegion-1,, stopping at row=testtb-testExportFileSystemStateWithMergeRegion-1 ,, for max=2147483647 with caching=100 2024-11-28T00:29:47,887 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$2(2278): Client=jenkins//172.17.0.2 merge regions [a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb] 2024-11-28T00:29:47,896 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb], force=true 2024-11-28T00:29:47,897 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb], force=true 2024-11-28T00:29:47,897 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb], force=true 2024-11-28T00:29:47,897 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=148, state=RUNNABLE:MERGE_TABLE_REGIONS_PREPARE, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb], force=true 2024-11-28T00:29:47,900 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:29:47,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, UNASSIGN}, {pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, UNASSIGN}] 2024-11-28T00:29:47,908 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, UNASSIGN 2024-11-28T00:29:47,908 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, UNASSIGN 2024-11-28T00:29:47,910 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=fee8516c778b886537c19942d5a2d0cb, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:29:47,910 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=a0fc42ec4643c853998d79771fbf60cc, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:47,913 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=150, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, UNASSIGN because future has completed 2024-11-28T00:29:47,914 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:29:47,914 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:29:47,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=149, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, UNASSIGN because future has completed 2024-11-28T00:29:47,916 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-28T00:29:47,916 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:29:48,012 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:29:48,070 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(122): Close fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:48,070 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:29:48,070 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1722): Closing fee8516c778b886537c19942d5a2d0cb, disabling compactions & flushes 2024-11-28T00:29:48,070 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:48,070 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:48,070 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. after waiting 0 ms 2024-11-28T00:29:48,070 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:48,070 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(2902): Flushing fee8516c778b886537c19942d5a2d0cb 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-28T00:29:48,072 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(122): Close a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:48,073 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-28T00:29:48,073 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1722): Closing a0fc42ec4643c853998d79771fbf60cc, disabling compactions & flushes 2024-11-28T00:29:48,073 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:48,073 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:48,073 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. after waiting 0 ms 2024-11-28T00:29:48,073 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:48,073 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(2902): Flushing a0fc42ec4643c853998d79771fbf60cc 1/1 column families, dataSize=24 B heapSize=352 B 2024-11-28T00:29:48,100 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/.tmp/cf/a65e7e311a724ca4b478e52cb39d787f is 28, key is 2/cf:/1732753787868/Put/seqid=0 2024-11-28T00:29:48,107 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/.tmp/cf/413b5603977f484196d05f3c114a760a is 28, key is 1/cf:/1732753787862/Put/seqid=0 2024-11-28T00:29:48,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:29:48,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:29:48,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742196_1372 (size=4945) 2024-11-28T00:29:48,122 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/.tmp/cf/a65e7e311a724ca4b478e52cb39d787f 2024-11-28T00:29:48,130 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/.tmp/cf/a65e7e311a724ca4b478e52cb39d787f as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f 2024-11-28T00:29:48,136 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f, entries=1, sequenceid=5, filesize=4.8 K 2024-11-28T00:29:48,138 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for fee8516c778b886537c19942d5a2d0cb in 68ms, sequenceid=5, compaction requested=false 2024-11-28T00:29:48,138 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithMergeRegion-1' 2024-11-28T00:29:48,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:29:48,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:29:48,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742197_1373 (size=4945) 2024-11-28T00:29:48,184 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24 B at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/.tmp/cf/413b5603977f484196d05f3c114a760a 2024-11-28T00:29:48,189 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/.tmp/cf/413b5603977f484196d05f3c114a760a as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a 2024-11-28T00:29:48,194 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a, entries=1, sequenceid=5, filesize=4.8 K 2024-11-28T00:29:48,196 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(3140): Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for a0fc42ec4643c853998d79771fbf60cc in 122ms, sequenceid=5, compaction requested=false 2024-11-28T00:29:48,200 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:29:48,201 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:29:48,201 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. 2024-11-28T00:29:48,201 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] regionserver.HRegion(1676): Region close journal for fee8516c778b886537c19942d5a2d0cb: Waiting for close lock at 1732753788070Running coprocessor pre-close hooks at 1732753788070Disabling compacts and flushes for region at 1732753788070Disabling writes for close at 1732753788070Obtaining lock to block concurrent updates at 1732753788070Preparing flush snapshotting stores in fee8516c778b886537c19942d5a2d0cb at 1732753788070Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732753788071 (+1 ms)Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb. at 1732753788074 (+3 ms)Flushing fee8516c778b886537c19942d5a2d0cb/cf: creating writer at 1732753788075 (+1 ms)Flushing fee8516c778b886537c19942d5a2d0cb/cf: appending metadata at 1732753788099 (+24 ms)Flushing fee8516c778b886537c19942d5a2d0cb/cf: closing flushed file at 1732753788099Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b4d8b59: reopening flushed file at 1732753788129 (+30 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for fee8516c778b886537c19942d5a2d0cb in 68ms, sequenceid=5, compaction requested=false at 1732753788138 (+9 ms)Writing region close event to WAL at 1732753788168 (+30 ms)Running coprocessor post-close hooks at 1732753788201 (+33 ms)Closed at 1732753788201 2024-11-28T00:29:48,203 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=151}] handler.UnassignRegionHandler(157): Closed fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:29:48,204 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=150 updating hbase:meta row=fee8516c778b886537c19942d5a2d0cb, regionState=CLOSED 2024-11-28T00:29:48,206 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=151, ppid=150, state=RUNNABLE, hasLock=false; CloseRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:29:48,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=151, resume processing ppid=150 2024-11-28T00:29:48,212 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=151, ppid=150, state=SUCCESS, hasLock=false; CloseRegionProcedure fee8516c778b886537c19942d5a2d0cb, server=72c8c7f04009,36427,1732753506332 in 297 msec 2024-11-28T00:29:48,213 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=150, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=fee8516c778b886537c19942d5a2d0cb, UNASSIGN in 306 msec 2024-11-28T00:29:48,220 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:29:48,224 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:29:48,225 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:29:48,225 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. 2024-11-28T00:29:48,225 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] regionserver.HRegion(1676): Region close journal for a0fc42ec4643c853998d79771fbf60cc: Waiting for close lock at 1732753788073Running coprocessor pre-close hooks at 1732753788073Disabling compacts and flushes for region at 1732753788073Disabling writes for close at 1732753788073Obtaining lock to block concurrent updates at 1732753788073Preparing flush snapshotting stores in a0fc42ec4643c853998d79771fbf60cc at 1732753788073Finished memstore snapshotting testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc., syncing WAL and waiting on mvcc, flushsize=dataSize=24, getHeapSize=336, getOffHeapSize=0, getCellsCount=1 at 1732753788073Flushing stores of testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc. at 1732753788074 (+1 ms)Flushing a0fc42ec4643c853998d79771fbf60cc/cf: creating writer at 1732753788075 (+1 ms)Flushing a0fc42ec4643c853998d79771fbf60cc/cf: appending metadata at 1732753788106 (+31 ms)Flushing a0fc42ec4643c853998d79771fbf60cc/cf: closing flushed file at 1732753788106Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2cf483e8: reopening flushed file at 1732753788189 (+83 ms)Finished flush of dataSize ~24 B/24, heapSize ~336 B/336, currentSize=0 B/0 for a0fc42ec4643c853998d79771fbf60cc in 122ms, sequenceid=5, compaction requested=false at 1732753788196 (+7 ms)Writing region close event to WAL at 1732753788216 (+20 ms)Running coprocessor post-close hooks at 1732753788225 (+9 ms)Closed at 1732753788225 2024-11-28T00:29:48,227 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=152}] handler.UnassignRegionHandler(157): Closed a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:29:48,227 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=149 updating hbase:meta row=a0fc42ec4643c853998d79771fbf60cc, regionState=CLOSED 2024-11-28T00:29:48,229 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=152, ppid=149, state=RUNNABLE, hasLock=false; CloseRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:29:48,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=152, resume processing ppid=149 2024-11-28T00:29:48,231 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=152, ppid=149, state=SUCCESS, hasLock=false; CloseRegionProcedure a0fc42ec4643c853998d79771fbf60cc, server=72c8c7f04009,41621,1732753506207 in 314 msec 2024-11-28T00:29:48,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=149, resume processing ppid=148 2024-11-28T00:29:48,233 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=149, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=a0fc42ec4643c853998d79771fbf60cc, UNASSIGN in 325 msec 2024-11-28T00:29:48,257 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:29:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:29:48,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742198_1374 (size=84) 2024-11-28T00:29:48,260 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:48,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:29:48,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:29:48,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742199_1375 (size=20) 2024-11-28T00:29:48,282 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:29:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:29:48,297 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742200_1376 (size=21) 2024-11-28T00:29:48,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:29:48,328 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:29:48,329 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742201_1377 (size=84) 2024-11-28T00:29:48,330 DEBUG [PEWorker-2 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:48,345 DEBUG [PEWorker-2 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=-1 2024-11-28T00:29:48,348 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787219.a0fc42ec4643c853998d79771fbf60cc.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:48,348 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,2,1732753787219.fee8516c778b886537c19942d5a2d0cb.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:48,348 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Put {"totalColumns":7,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.","families":{"info":[{"qualifier":"regioninfo","vlen":83,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0000","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"merge0001","vlen":84,"tag":[],"timestamp":"9223372036854775807"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"9223372036854775807"}]},"ts":"9223372036854775807"} 2024-11-28T00:29:48,354 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, ASSIGN}] 2024-11-28T00:29:48,355 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, ASSIGN 2024-11-28T00:29:48,356 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, ASSIGN; state=MERGED, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:29:48,507 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:29:48,507 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=9862736ad6a4a00d92d45d3e32dfbb1d, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:48,510 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=153, ppid=148, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, ASSIGN because future has completed 2024-11-28T00:29:48,510 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:29:48,531 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:29:48,666 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:29:48,666 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7752): Opening region: {ENCODED => 9862736ad6a4a00d92d45d3e32dfbb1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:29:48,666 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. service=AccessControlService 2024-11-28T00:29:48,667 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:29:48,667 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion-1 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,667 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:29:48,667 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7794): checking encryption for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,667 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(7797): checking classloading for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,669 INFO [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,669 INFO [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9862736ad6a4a00d92d45d3e32dfbb1d columnFamilyName cf 2024-11-28T00:29:48,670 DEBUG [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:29:48,680 DEBUG [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/413b5603977f484196d05f3c114a760a.a0fc42ec4643c853998d79771fbf60cc->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a-top 2024-11-28T00:29:48,686 DEBUG [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/a65e7e311a724ca4b478e52cb39d787f.fee8516c778b886537c19942d5a2d0cb->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f-top 2024-11-28T00:29:48,686 INFO [StoreOpener-9862736ad6a4a00d92d45d3e32dfbb1d-1 {}] regionserver.HStore(327): Store=9862736ad6a4a00d92d45d3e32dfbb1d/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:29:48,687 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1038): replaying wal for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,688 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,689 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,690 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1048): stopping wal replay for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,690 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1060): Cleaning up temporary data for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,693 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1093): writing seq id for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,694 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1114): Opened 9862736ad6a4a00d92d45d3e32dfbb1d; next sequenceid=9; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=75481500, jitterRate=0.12476199865341187}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:29:48,694 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:48,695 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegion(1006): Region open journal for 9862736ad6a4a00d92d45d3e32dfbb1d: Running coprocessor pre-open hook at 1732753788667Writing region info on filesystem at 1732753788667Initializing all the Stores at 1732753788668 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753788668Cleaning up temporary data from old regions at 1732753788690 (+22 ms)Running coprocessor post-open hooks at 1732753788694 (+4 ms)Region opened successfully at 1732753788695 (+1 ms) 2024-11-28T00:29:48,696 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d., pid=154, masterSystemTime=1732753788662 2024-11-28T00:29:48,696 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.CompactSplit(342): Ignoring compaction request for testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.,because compaction is disabled. 2024-11-28T00:29:48,698 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:29:48,698 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=154}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:29:48,699 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=153 updating hbase:meta row=9862736ad6a4a00d92d45d3e32dfbb1d, regionState=OPEN, openSeqNum=9, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:29:48,701 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=154, ppid=153, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:29:48,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=154, resume processing ppid=153 2024-11-28T00:29:48,704 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=154, ppid=153, state=SUCCESS, hasLock=false; OpenRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207 in 192 msec 2024-11-28T00:29:48,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=153, resume processing ppid=148 2024-11-28T00:29:48,706 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=153, ppid=148, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, ASSIGN in 350 msec 2024-11-28T00:29:48,721 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=148, state=SUCCESS, hasLock=false; MergeTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, regions=[a0fc42ec4643c853998d79771fbf60cc, fee8516c778b886537c19942d5a2d0cb], force=true in 817 msec 2024-11-28T00:29:49,042 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=148 2024-11-28T00:29:49,042 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: MERGE_REGIONS, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:29:49,043 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-28T00:29:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753789043 (current time:1732753789043). 2024-11-28T00:29:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:29:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 VERSION not specified, setting to 2 2024-11-28T00:29:49,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:29:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@b249819, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:49,044 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:49,045 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:49,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:49,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:49,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4697fbcf, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:49,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:49,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,047 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:49,047 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5c5a8967, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,048 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:49,048 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:49,049 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:49,050 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55666, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:49,051 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,051 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,051 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55b65fc7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:29:49,052 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:29:49,053 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:29:49,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:29:49,053 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:29:49,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d367d60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:29:49,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:29:49,054 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,055 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48582, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:29:49,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@63c29fca, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:29:49,056 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:29:49,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:29:49,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:49,057 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55678, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:49,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithMergeRegion-1', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:29:49,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:29:49,060 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40020, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:29:49,061 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:29:49,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:29:49,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:29:49,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithMergeRegion-1], kv [jenkins: RWXCA] 2024-11-28T00:29:49,062 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:29:49,062 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:29:49,063 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } 2024-11-28T00:29:49,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-28T00:29:49,064 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:29:49,064 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:29:49,066 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:29:49,068 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:29:49,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:29:49,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:29:49,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742202_1378 (size=216) 2024-11-28T00:29:49,083 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:29:49,083 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d}] 2024-11-28T00:29:49,085 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:49,178 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:29:49,237 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=156 2024-11-28T00:29:49,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:29:49,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.HRegion(2603): Flush status journal for 9862736ad6a4a00d92d45d3e32dfbb1d: 2024-11-28T00:29:49,237 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. for snaptb0-testExportFileSystemStateWithMergeRegion-1 completed. 2024-11-28T00:29:49,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.' region-info for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:29:49,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/413b5603977f484196d05f3c114a760a.a0fc42ec4643c853998d79771fbf60cc->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a-top, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/a65e7e311a724ca4b478e52cb39d787f.fee8516c778b886537c19942d5a2d0cb->hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f-top] hfiles 2024-11-28T00:29:49,238 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/413b5603977f484196d05f3c114a760a.a0fc42ec4643c853998d79771fbf60cc for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,239 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] snapshot.SnapshotManifest(265): Adding reference for file (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/a65e7e311a724ca4b478e52cb39d787f.fee8516c778b886537c19942d5a2d0cb for snapshot=snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:29:49,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:29:49,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742203_1379 (size=269) 2024-11-28T00:29:49,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:29:49,256 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=156}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=156 2024-11-28T00:29:49,257 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=156 2024-11-28T00:29:49,257 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithMergeRegion-1 on region 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:49,257 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=156, ppid=155, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:29:49,260 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=156, resume processing ppid=155 2024-11-28T00:29:49,261 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=156, ppid=155, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d in 176 msec 2024-11-28T00:29:49,261 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:29:49,261 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:29:49,262 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:29:49,262 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,263 DEBUG [PEWorker-5 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,303 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0006_000001 (auth:SIMPLE) from 127.0.0.1:54304 2024-11-28T00:29:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:29:49,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:29:49,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742204_1380 (size=670) 2024-11-28T00:29:49,322 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:29:49,328 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:29:49,329 DEBUG [PEWorker-5 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,330 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=155, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:29:49,331 DEBUG [PEWorker-5 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 }, snapshot procedure id = 155 2024-11-28T00:29:49,332 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=155, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=155, snapshot={ ss=snaptb0-testExportFileSystemStateWithMergeRegion-1 table=testtb-testExportFileSystemStateWithMergeRegion-1 type=FLUSH ttl=0 } in 269 msec 2024-11-28T00:29:49,333 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000001/launch_container.sh] 2024-11-28T00:29:49,333 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000001/container_tokens] 2024-11-28T00:29:49,333 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0006/container_1732753513471_0006_01_000001/sysfs] 2024-11-28T00:29:49,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=155 2024-11-28T00:29:49,381 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:29:49,381 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381 2024-11-28T00:29:49,382 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:49,433 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:29:49,433 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,435 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:29:49,444 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:49,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742205_1381 (size=216) 2024-11-28T00:29:49,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742206_1382 (size=670) 2024-11-28T00:29:49,470 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742206_1382 (size=670) 2024-11-28T00:29:49,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742205_1381 (size=216) 2024-11-28T00:29:49,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742205_1381 (size=216) 2024-11-28T00:29:49,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742206_1382 (size=670) 2024-11-28T00:29:49,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:49,474 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:49,475 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:50,224 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:29:51,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-7498397473055029003.jar 2024-11-28T00:29:51,040 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,041 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-14986084430151420066.jar 2024-11-28T00:29:51,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,107 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,108 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:29:51,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:29:51,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:29:51,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:29:51,109 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:29:51,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:29:51,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:29:51,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:29:51,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:29:51,110 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:29:51,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:29:51,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:29:51,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:51,111 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:51,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:51,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:51,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:29:51,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:51,112 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:29:51,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742207_1383 (size=131440) 2024-11-28T00:29:51,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742207_1383 (size=131440) 2024-11-28T00:29:51,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742207_1383 (size=131440) 2024-11-28T00:29:51,211 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742208_1384 (size=4188619) 2024-11-28T00:29:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742208_1384 (size=4188619) 2024-11-28T00:29:51,212 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742208_1384 (size=4188619) 2024-11-28T00:29:51,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742209_1385 (size=1323991) 2024-11-28T00:29:51,223 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742209_1385 (size=1323991) 2024-11-28T00:29:51,224 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742209_1385 (size=1323991) 2024-11-28T00:29:51,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742210_1386 (size=903927) 2024-11-28T00:29:51,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742210_1386 (size=903927) 2024-11-28T00:29:51,238 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742210_1386 (size=903927) 2024-11-28T00:29:51,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742211_1387 (size=8360083) 2024-11-28T00:29:51,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742211_1387 (size=8360083) 2024-11-28T00:29:51,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742211_1387 (size=8360083) 2024-11-28T00:29:51,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742212_1388 (size=1877034) 2024-11-28T00:29:51,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742212_1388 (size=1877034) 2024-11-28T00:29:51,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742212_1388 (size=1877034) 2024-11-28T00:29:51,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742213_1389 (size=77835) 2024-11-28T00:29:51,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742213_1389 (size=77835) 2024-11-28T00:29:51,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742213_1389 (size=77835) 2024-11-28T00:29:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742214_1390 (size=30949) 2024-11-28T00:29:51,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742214_1390 (size=30949) 2024-11-28T00:29:51,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742214_1390 (size=30949) 2024-11-28T00:29:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742215_1391 (size=1597213) 2024-11-28T00:29:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742215_1391 (size=1597213) 2024-11-28T00:29:51,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742215_1391 (size=1597213) 2024-11-28T00:29:51,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742216_1392 (size=4695811) 2024-11-28T00:29:51,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742216_1392 (size=4695811) 2024-11-28T00:29:51,338 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742216_1392 (size=4695811) 2024-11-28T00:29:51,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742217_1393 (size=6424743) 2024-11-28T00:29:51,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742217_1393 (size=6424743) 2024-11-28T00:29:51,374 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742217_1393 (size=6424743) 2024-11-28T00:29:51,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742218_1394 (size=232957) 2024-11-28T00:29:51,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742218_1394 (size=232957) 2024-11-28T00:29:51,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742218_1394 (size=232957) 2024-11-28T00:29:51,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742219_1395 (size=127628) 2024-11-28T00:29:51,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742219_1395 (size=127628) 2024-11-28T00:29:51,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742219_1395 (size=127628) 2024-11-28T00:29:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742220_1396 (size=20406) 2024-11-28T00:29:51,400 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742220_1396 (size=20406) 2024-11-28T00:29:51,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742220_1396 (size=20406) 2024-11-28T00:29:51,422 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742221_1397 (size=5175431) 2024-11-28T00:29:51,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742221_1397 (size=5175431) 2024-11-28T00:29:51,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742221_1397 (size=5175431) 2024-11-28T00:29:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742222_1398 (size=217634) 2024-11-28T00:29:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742222_1398 (size=217634) 2024-11-28T00:29:51,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742222_1398 (size=217634) 2024-11-28T00:29:51,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742223_1399 (size=1832290) 2024-11-28T00:29:51,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742223_1399 (size=1832290) 2024-11-28T00:29:51,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742223_1399 (size=1832290) 2024-11-28T00:29:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742224_1400 (size=322274) 2024-11-28T00:29:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742224_1400 (size=322274) 2024-11-28T00:29:51,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742224_1400 (size=322274) 2024-11-28T00:29:51,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742225_1401 (size=503880) 2024-11-28T00:29:51,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742225_1401 (size=503880) 2024-11-28T00:29:51,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742225_1401 (size=503880) 2024-11-28T00:29:51,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742226_1402 (size=440957) 2024-11-28T00:29:51,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742226_1402 (size=440957) 2024-11-28T00:29:51,468 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742226_1402 (size=440957) 2024-11-28T00:29:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742227_1403 (size=29229) 2024-11-28T00:29:51,474 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742227_1403 (size=29229) 2024-11-28T00:29:51,475 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742227_1403 (size=29229) 2024-11-28T00:29:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742228_1404 (size=24096) 2024-11-28T00:29:51,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742228_1404 (size=24096) 2024-11-28T00:29:51,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742228_1404 (size=24096) 2024-11-28T00:29:51,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742229_1405 (size=111872) 2024-11-28T00:29:51,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742229_1405 (size=111872) 2024-11-28T00:29:51,493 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742229_1405 (size=111872) 2024-11-28T00:29:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742230_1406 (size=45609) 2024-11-28T00:29:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742230_1406 (size=45609) 2024-11-28T00:29:51,500 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742230_1406 (size=45609) 2024-11-28T00:29:51,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742231_1407 (size=136454) 2024-11-28T00:29:51,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742231_1407 (size=136454) 2024-11-28T00:29:51,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742231_1407 (size=136454) 2024-11-28T00:29:51,508 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:29:51,510 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithMergeRegion-1' hfile list 2024-11-28T00:29:51,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=4.8 K 2024-11-28T00:29:51,512 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=4.8 K 2024-11-28T00:29:51,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:29:51,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:29:51,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742232_1408 (size=481) 2024-11-28T00:29:51,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:29:51,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:29:51,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742233_1409 (size=21) 2024-11-28T00:29:51,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742234_1410 (size=304055) 2024-11-28T00:29:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742234_1410 (size=304055) 2024-11-28T00:29:51,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742234_1410 (size=304055) 2024-11-28T00:29:51,557 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:29:51,557 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:29:52,101 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:33632 2024-11-28T00:29:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:29:55,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 Metrics about Tables on a single HBase RegionServer 2024-11-28T00:30:00,582 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:37532 2024-11-28T00:30:00,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742235_1411 (size=349753) 2024-11-28T00:30:00,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742235_1411 (size=349753) 2024-11-28T00:30:00,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742235_1411 (size=349753) 2024-11-28T00:30:02,910 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:45134 2024-11-28T00:30:02,915 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:48098 2024-11-28T00:30:03,956 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:30:06,160 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=660.99 KB, freeSize=879.35 MB, max=880 MB, blockCount=0, accesses=0, hits=0, hitRatio=0, cachingAccesses=0, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-28T00:30:06,290 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=662.40 KB, freeSize=879.35 MB, max=880 MB, blockCount=2, accesses=2, hits=0, hitRatio=0, cachingAccesses=2, cachingHits=0, cachingHitsRatio=0,evictions=29, evicted=0, evictedPerRun=0.0 2024-11-28T00:30:06,364 INFO [LruBlockCacheStatsExecutor {}] hfile.LruBlockCache(1020): totalSize=880 MB, usedSize=919.14 KB, freeSize=879.10 MB, max=880 MB, blockCount=3, accesses=5, hits=2, hitRatio=40.00%, , cachingAccesses=5, cachingHits=2, cachingHitsRatio=40.00%, evictions=29, evicted=0, evictedPerRun=0.0 2024-11-28T00:30:06,494 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=2, created chunk count=10, reused chunk count=14, reuseRatio=58.33% 2024-11-28T00:30:06,496 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-28T00:30:07,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:30:07,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:30:07,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742236_1412 (size=4945) 2024-11-28T00:30:08,241 INFO [regionserver/72c8c7f04009:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:30:08,255 INFO [regionserver/72c8c7f04009:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:30:08,354 INFO [regionserver/72c8c7f04009:0.Chore.1 {}] regionserver.Replication$ReplicationStatisticsChore(208): Global stats: WAL Edits Buffer Used=0B, Limit=268435456B 2024-11-28T00:30:08,973 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:30:08,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:30:08,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742238_1414 (size=4945) 2024-11-28T00:30:09,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:30:09,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:30:09,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742237_1413 (size=22246) 2024-11-28T00:30:09,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742239_1415 (size=483) 2024-11-28T00:30:09,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742239_1415 (size=483) 2024-11-28T00:30:09,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742239_1415 (size=483) 2024-11-28T00:30:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:30:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:30:09,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742240_1416 (size=22246) 2024-11-28T00:30:09,111 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000003/launch_container.sh] 2024-11-28T00:30:09,111 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000003/container_tokens] 2024-11-28T00:30:09,111 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000003/sysfs] 2024-11-28T00:30:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742241_1417 (size=349753) 2024-11-28T00:30:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742241_1417 (size=349753) 2024-11-28T00:30:09,114 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742241_1417 (size=349753) 2024-11-28T00:30:09,130 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:48106 2024-11-28T00:30:09,230 INFO [regionserver/72c8c7f04009:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:meta,,1.1588230740 because 1588230740/ns has an old edit so flush to free WALs after random delay 134973 ms 2024-11-28T00:30:09,236 DEBUG [master/72c8c7f04009:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-28T00:30:09,237 DEBUG [master/72c8c7f04009:0.Chore.1 {}] janitor.CatalogJanitor(258): Cleaning merged region {ENCODED => 9862736ad6a4a00d92d45d3e32dfbb1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.', STARTKEY => '', ENDKEY => ''} 2024-11-28T00:30:09,238 DEBUG [master/72c8c7f04009:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:09,238 DEBUG [master/72c8c7f04009:0.Chore.1 {}] janitor.CatalogJanitor(283): Deferring cleanup up of 2 parents of merged region 9862736ad6a4a00d92d45d3e32dfbb1d, because references still exist in merged region or we encountered an exception in checking 2024-11-28T00:30:09,239 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region daf7088603a9f84f2be699938b8bcdb0 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:30:09,239 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region a29845d15726dd0a5b11d9ae28fd4c0c changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:30:09,241 DEBUG [master/72c8c7f04009:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(138): Balancing RSGroup=default 2024-11-28T00:30:09,241 INFO [master/72c8c7f04009:0.Chore.1 {}] rsgroup.RSGroupBasedLoadBalancer(151): Start Generate Balance plan for group: default 2024-11-28T00:30:09,241 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BaseLoadBalancer(619): Start Generate Balance plan for cluster. 2024-11-28T00:30:09,242 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:30:09,243 DEBUG [master/72c8c7f04009:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:09,245 DEBUG [master/72c8c7f04009:0.Chore.1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(303): server 0 has 2 regions 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(303): server 1 has 3 regions 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:30:09,247 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:30:09,248 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:30:09,248 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:30:09,248 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:30:09,248 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.BalancerClusterState(326): Number of tables=4, number of hosts=1, number of racks=1 2024-11-28T00:30:09,251 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.StochasticLoadBalancer(370): Running balancer because cluster has idle server(s). function cost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.41143782776614785, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.833041948207206, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8648601852843434, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.5773502691896258, need balance); 2024-11-28T00:30:09,251 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.StochasticLoadBalancer(515): Start StochasticLoadBalancer.balancer, initial weighted average imbalance=0.35765267857739674, functionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.41143782776614785, need balance); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.0); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.833041948207206, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8648601852843434, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.5773502691896258, need balance); computedMaxSteps=12000 2024-11-28T00:30:09,382 INFO [master/72c8c7f04009:0.Chore.1 {}] balancer.StochasticLoadBalancer(562): Finished computing new moving plan. Computation took 133 ms to try 12000 different iterations. Found a solution that moves 1 regions; Going from a computed imbalance of 0.35765267857739674 to a new imbalance of 0.016292439320358728. funtionCost=RegionCountSkewCostFunction : (multiplier=500.0, imbalance=0.0); PrimaryRegionCountSkewCostFunction : (not needed); MoveCostFunction : (multiplier=7.0, imbalance=0.2, need balance); ServerLocalityCostFunction : (multiplier=25.0, imbalance=0.0); RackLocalityCostFunction : (multiplier=15.0, imbalance=0.0); TableSkewCostFunction : (multiplier=35.0, imbalance=0.0); RegionReplicaHostCostFunction : (not needed); RegionReplicaRackCostFunction : (not needed); ReadRequestCostFunction : (multiplier=5.0, imbalance=0.833041948207206, need balance); CPRequestCostFunction : (multiplier=5.0, imbalance=0.0); WriteRequestCostFunction : (multiplier=5.0, imbalance=0.8648601852843434, need balance); MemStoreSizeCostFunction : (multiplier=5.0, imbalance=0.0); StoreFileCostFunction : (multiplier=5.0, imbalance=0.0); 2024-11-28T00:30:09,386 INFO [master/72c8c7f04009:0.Chore.1 {}] master.HMaster(2167): Balancer plans size is 1, the balance interval is 300000 ms, and the max number regions in transition is 5 2024-11-28T00:30:09,386 INFO [master/72c8c7f04009:0.Chore.1 {}] master.HMaster(2172): balance hri=a29845d15726dd0a5b11d9ae28fd4c0c, source=72c8c7f04009,41621,1732753506207, destination=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:09,388 DEBUG [master/72c8c7f04009:0.Chore.1 {}] procedure2.ProcedureExecutor(1139): Stored pid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE 2024-11-28T00:30:09,388 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE 2024-11-28T00:30:09,390 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=157 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:09,392 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=157, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE because future has completed 2024-11-28T00:30:09,392 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:09,392 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=158, ppid=157, state=RUNNABLE, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:09,545 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] handler.UnassignRegionHandler(122): Close a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:09,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1722): Closing a29845d15726dd0a5b11d9ae28fd4c0c, disabling compactions & flushes 2024-11-28T00:30:09,545 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. after waiting 0 ms 2024-11-28T00:30:09,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,549 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:09,549 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:09,549 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,550 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegion(1676): Region close journal for a29845d15726dd0a5b11d9ae28fd4c0c: Waiting for close lock at 1732753809545Running coprocessor pre-close hooks at 1732753809545Disabling compacts and flushes for region at 1732753809545Disabling writes for close at 1732753809545Writing region close event to WAL at 1732753809546 (+1 ms)Running coprocessor post-close hooks at 1732753809549 (+3 ms)Closed at 1732753809549 2024-11-28T00:30:09,550 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] regionserver.HRegionServer(3302): Adding a29845d15726dd0a5b11d9ae28fd4c0c move to 72c8c7f04009,43335,1732753506004 record at close sequenceid=6 2024-11-28T00:30:09,552 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=158}] handler.UnassignRegionHandler(157): Closed a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,553 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=157 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=CLOSED 2024-11-28T00:30:09,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=158, ppid=157, state=RUNNABLE, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:09,557 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=158, resume processing ppid=157 2024-11-28T00:30:09,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=158, ppid=157, state=SUCCESS, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,41621,1732753506207 in 163 msec 2024-11-28T00:30:09,558 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=157, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE; state=CLOSED, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:30:09,709 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-28T00:30:09,709 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=157 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:09,711 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=157, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE because future has completed 2024-11-28T00:30:09,711 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=159, ppid=157, state=RUNNABLE, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:30:09,867 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,867 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7752): Opening region: {ENCODED => a29845d15726dd0a5b11d9ae28fd4c0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:30:09,869 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. service=AccessControlService 2024-11-28T00:30:09,869 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:09,869 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithMergeRegion a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,869 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:09,869 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7794): checking encryption for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,869 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(7797): checking classloading for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,871 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,872 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a29845d15726dd0a5b11d9ae28fd4c0c columnFamilyName cf 2024-11-28T00:30:09,873 DEBUG [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:09,895 DEBUG [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d 2024-11-28T00:30:09,895 INFO [StoreOpener-a29845d15726dd0a5b11d9ae28fd4c0c-1 {}] regionserver.HStore(327): Store=a29845d15726dd0a5b11d9ae28fd4c0c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:09,896 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1038): replaying wal for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,896 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,898 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,898 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1048): stopping wal replay for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,898 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1060): Cleaning up temporary data for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,902 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1093): writing seq id for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,902 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1114): Opened a29845d15726dd0a5b11d9ae28fd4c0c; next sequenceid=10; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=67296152, jitterRate=0.00279080867767334}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:09,903 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:09,903 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegion(1006): Region open journal for a29845d15726dd0a5b11d9ae28fd4c0c: Running coprocessor pre-open hook at 1732753809869Writing region info on filesystem at 1732753809870 (+1 ms)Initializing all the Stores at 1732753809871 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753809871Cleaning up temporary data from old regions at 1732753809898 (+27 ms)Running coprocessor post-open hooks at 1732753809903 (+5 ms)Region opened successfully at 1732753809903 2024-11-28T00:30:09,904 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c., pid=159, masterSystemTime=1732753809863 2024-11-28T00:30:09,906 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,907 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=157 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=OPEN, openSeqNum=10, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:09,907 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=159}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:09,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=159, ppid=157, state=RUNNABLE, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:30:09,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=159, resume processing ppid=157 2024-11-28T00:30:09,913 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=159, ppid=157, state=SUCCESS, hasLock=false; OpenRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004 in 200 msec 2024-11-28T00:30:09,915 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=157, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, REOPEN/MOVE in 527 msec 2024-11-28T00:30:09,989 DEBUG [master/72c8c7f04009:0.Chore.1 {}] master.HMaster(2203): Balancer is going into sleep until next period in 300000ms 2024-11-28T00:30:09,995 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-28T00:30:09,995 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(219): Skipping table testtb-testExportFileSystemStateWithMergeRegion-1 because normalization is disabled in its table properties and normalization is also disabled at table level by default 2024-11-28T00:30:10,914 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:30:10,915 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:30:10,921 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:10,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:30:10,922 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:30:10,922 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:10,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-28T00:30:10,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-28T00:30:10,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:10,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/.snapshotinfo 2024-11-28T00:30:10,923 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753789381/.hbase-snapshot/snaptb0-testExportFileSystemStateWithMergeRegion-1/data.manifest 2024-11-28T00:30:10,929 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:10,930 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=160, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:10,932 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-11-28T00:30:10,933 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753810933"}]},"ts":"1732753810933"} 2024-11-28T00:30:10,934 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLING in hbase:meta 2024-11-28T00:30:10,935 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLING 2024-11-28T00:30:10,935 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=161, ppid=160, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1}] 2024-11-28T00:30:10,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, UNASSIGN}] 2024-11-28T00:30:10,938 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, UNASSIGN 2024-11-28T00:30:10,938 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=162 updating hbase:meta row=9862736ad6a4a00d92d45d3e32dfbb1d, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:10,940 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=162, ppid=161, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, UNASSIGN because future has completed 2024-11-28T00:30:10,940 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:10,940 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=163, ppid=162, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:11,041 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-11-28T00:30:11,092 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(122): Close 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:30:11,092 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:11,092 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1722): Closing 9862736ad6a4a00d92d45d3e32dfbb1d, disabling compactions & flushes 2024-11-28T00:30:11,092 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:30:11,092 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:30:11,092 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. after waiting 0 ms 2024-11-28T00:30:11,092 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:30:11,097 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=8 2024-11-28T00:30:11,097 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:11,097 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d. 2024-11-28T00:30:11,097 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] regionserver.HRegion(1676): Region close journal for 9862736ad6a4a00d92d45d3e32dfbb1d: Waiting for close lock at 1732753811092Running coprocessor pre-close hooks at 1732753811092Disabling compacts and flushes for region at 1732753811092Disabling writes for close at 1732753811092Writing region close event to WAL at 1732753811093 (+1 ms)Running coprocessor post-close hooks at 1732753811097 (+4 ms)Closed at 1732753811097 2024-11-28T00:30:11,099 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=163}] handler.UnassignRegionHandler(157): Closed 9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:30:11,100 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=162 updating hbase:meta row=9862736ad6a4a00d92d45d3e32dfbb1d, regionState=CLOSED 2024-11-28T00:30:11,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=163, ppid=162, state=RUNNABLE, hasLock=false; CloseRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:11,103 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=163, resume processing ppid=162 2024-11-28T00:30:11,104 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=163, ppid=162, state=SUCCESS, hasLock=false; CloseRegionProcedure 9862736ad6a4a00d92d45d3e32dfbb1d, server=72c8c7f04009,41621,1732753506207 in 162 msec 2024-11-28T00:30:11,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=162, resume processing ppid=161 2024-11-28T00:30:11,105 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=162, ppid=161, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1, region=9862736ad6a4a00d92d45d3e32dfbb1d, UNASSIGN in 167 msec 2024-11-28T00:30:11,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=161, resume processing ppid=160 2024-11-28T00:30:11,107 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=161, ppid=160, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 171 msec 2024-11-28T00:30:11,109 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753811108"}]},"ts":"1732753811108"} 2024-11-28T00:30:11,110 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion-1, state=DISABLED in hbase:meta 2024-11-28T00:30:11,110 INFO [PEWorker-4 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion-1 to state=DISABLED 2024-11-28T00:30:11,112 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=160, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 181 msec 2024-11-28T00:30:11,251 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=160 2024-11-28T00:30:11,251 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:30:11,252 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,253 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,254 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=164, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,255 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=164, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,257 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,258 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:30:11,258 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:30:11,258 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:30:11,260 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/recovered.edits] 2024-11-28T00:30:11,260 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/recovered.edits] 2024-11-28T00:30:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,260 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,261 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/recovered.edits] 2024-11-28T00:30:11,261 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:30:11,262 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:30:11,262 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:30:11,262 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion-1 with data PBUF 2024-11-28T00:30:11,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,264 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:11,266 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:11,268 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/413b5603977f484196d05f3c114a760a.a0fc42ec4643c853998d79771fbf60cc to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/413b5603977f484196d05f3c114a760a.a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:30:11,268 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/cf/413b5603977f484196d05f3c114a760a 2024-11-28T00:30:11,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,268 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,268 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/cf/a65e7e311a724ca4b478e52cb39d787f 2024-11-28T00:30:11,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=164 2024-11-28T00:30:11,270 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/a65e7e311a724ca4b478e52cb39d787f.fee8516c778b886537c19942d5a2d0cb to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/cf/a65e7e311a724ca4b478e52cb39d787f.fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:30:11,272 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/recovered.edits/8.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc/recovered.edits/8.seqid 2024-11-28T00:30:11,272 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/recovered.edits/8.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb/recovered.edits/8.seqid 2024-11-28T00:30:11,273 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/fee8516c778b886537c19942d5a2d0cb 2024-11-28T00:30:11,273 DEBUG [HFileArchiver-17 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/a0fc42ec4643c853998d79771fbf60cc 2024-11-28T00:30:11,274 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/recovered.edits/12.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d/recovered.edits/12.seqid 2024-11-28T00:30:11,275 DEBUG [HFileArchiver-16 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion-1/9862736ad6a4a00d92d45d3e32dfbb1d 2024-11-28T00:30:11,275 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion-1 regions 2024-11-28T00:30:11,275 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:11,275 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF\x0AU\x0A\x07jenkins\x12J\x08\x03"F\x0A:\x0A\x07default\x12/testtb-testExportFileSystemStateWithMergeRegion \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:11,277 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=164, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,280 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 1 rows of testtb-testExportFileSystemStateWithMergeRegion-1 from hbase:meta 2024-11-28T00:30:11,282 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' descriptor. 2024-11-28T00:30:11,283 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=164, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,283 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion-1' from region states. 2024-11-28T00:30:11,283 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753811283"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:11,285 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 1 regions from META 2024-11-28T00:30:11,285 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 9862736ad6a4a00d92d45d3e32dfbb1d, NAME => 'testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d.', STARTKEY => '', ENDKEY => ''}] 2024-11-28T00:30:11,285 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion-1' as deleted. 2024-11-28T00:30:11,285 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion-1","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753811285"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:11,287 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion-1 state from META 2024-11-28T00:30:11,287 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=164, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,289 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=164, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion-1 in 35 msec 2024-11-28T00:30:11,381 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=164 2024-11-28T00:30:11,382 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,382 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion-1 completed 2024-11-28T00:30:11,382 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,383 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=165, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,385 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-28T00:30:11,386 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753811385"}]},"ts":"1732753811385"} 2024-11-28T00:30:11,387 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLING in hbase:meta 2024-11-28T00:30:11,387 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLING 2024-11-28T00:30:11,388 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=166, ppid=165, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion}] 2024-11-28T00:30:11,389 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, UNASSIGN}, {pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, UNASSIGN}] 2024-11-28T00:30:11,390 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, UNASSIGN 2024-11-28T00:30:11,390 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, UNASSIGN 2024-11-28T00:30:11,391 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=168 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:11,391 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=167 updating hbase:meta row=daf7088603a9f84f2be699938b8bcdb0, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:11,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=167, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, UNASSIGN because future has completed 2024-11-28T00:30:11,393 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:11,393 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=169, ppid=167, state=RUNNABLE, hasLock=false; CloseRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:30:11,393 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=168, ppid=166, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, UNASSIGN because future has completed 2024-11-28T00:30:11,394 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:11,394 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=170, ppid=168, state=RUNNABLE, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:30:11,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-28T00:30:11,545 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(122): Close daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:30:11,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:11,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1722): Closing daf7088603a9f84f2be699938b8bcdb0, disabling compactions & flushes 2024-11-28T00:30:11,545 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:30:11,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:30:11,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. after waiting 0 ms 2024-11-28T00:30:11,545 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:30:11,547 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(122): Close a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:11,547 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:11,547 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1722): Closing a29845d15726dd0a5b11d9ae28fd4c0c, disabling compactions & flushes 2024-11-28T00:30:11,547 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:11,547 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:11,547 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. after waiting 0 ms 2024-11-28T00:30:11,548 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:11,550 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:11,551 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:11,551 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0. 2024-11-28T00:30:11,551 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] regionserver.HRegion(1676): Region close journal for daf7088603a9f84f2be699938b8bcdb0: Waiting for close lock at 1732753811545Running coprocessor pre-close hooks at 1732753811545Disabling compacts and flushes for region at 1732753811545Disabling writes for close at 1732753811545Writing region close event to WAL at 1732753811546 (+1 ms)Running coprocessor post-close hooks at 1732753811550 (+4 ms)Closed at 1732753811551 (+1 ms) 2024-11-28T00:30:11,551 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits/12.seqid, newMaxSeqId=12, maxSeqId=9 2024-11-28T00:30:11,551 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:11,552 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c. 2024-11-28T00:30:11,552 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] regionserver.HRegion(1676): Region close journal for a29845d15726dd0a5b11d9ae28fd4c0c: Waiting for close lock at 1732753811547Running coprocessor pre-close hooks at 1732753811547Disabling compacts and flushes for region at 1732753811547Disabling writes for close at 1732753811548 (+1 ms)Writing region close event to WAL at 1732753811548Running coprocessor post-close hooks at 1732753811551 (+3 ms)Closed at 1732753811552 (+1 ms) 2024-11-28T00:30:11,552 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=169}] handler.UnassignRegionHandler(157): Closed daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:30:11,553 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=167 updating hbase:meta row=daf7088603a9f84f2be699938b8bcdb0, regionState=CLOSED 2024-11-28T00:30:11,553 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=170}] handler.UnassignRegionHandler(157): Closed a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:11,554 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=168 updating hbase:meta row=a29845d15726dd0a5b11d9ae28fd4c0c, regionState=CLOSED 2024-11-28T00:30:11,555 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=169, ppid=167, state=RUNNABLE, hasLock=false; CloseRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:30:11,556 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=170, ppid=168, state=RUNNABLE, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:30:11,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=169, resume processing ppid=167 2024-11-28T00:30:11,557 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=169, ppid=167, state=SUCCESS, hasLock=false; CloseRegionProcedure daf7088603a9f84f2be699938b8bcdb0, server=72c8c7f04009,36427,1732753506332 in 163 msec 2024-11-28T00:30:11,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=170, resume processing ppid=168 2024-11-28T00:30:11,558 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=167, ppid=166, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=daf7088603a9f84f2be699938b8bcdb0, UNASSIGN in 168 msec 2024-11-28T00:30:11,558 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=170, ppid=168, state=SUCCESS, hasLock=false; CloseRegionProcedure a29845d15726dd0a5b11d9ae28fd4c0c, server=72c8c7f04009,43335,1732753506004 in 163 msec 2024-11-28T00:30:11,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=168, resume processing ppid=166 2024-11-28T00:30:11,560 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=168, ppid=166, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithMergeRegion, region=a29845d15726dd0a5b11d9ae28fd4c0c, UNASSIGN in 169 msec 2024-11-28T00:30:11,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=166, resume processing ppid=165 2024-11-28T00:30:11,562 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=166, ppid=165, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 172 msec 2024-11-28T00:30:11,563 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753811563"}]},"ts":"1732753811563"} 2024-11-28T00:30:11,564 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithMergeRegion, state=DISABLED in hbase:meta 2024-11-28T00:30:11,565 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithMergeRegion to state=DISABLED 2024-11-28T00:30:11,567 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=165, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 183 msec 2024-11-28T00:30:11,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=165 2024-11-28T00:30:11,701 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:30:11,701 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,702 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,703 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=171, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,703 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,704 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=171, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,706 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,707 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:30:11,708 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:11,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,709 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,710 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits] 2024-11-28T00:30:11,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:30:11,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:30:11,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:30:11,710 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithMergeRegion with data PBUF 2024-11-28T00:30:11,710 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/recovered.edits] 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:11,713 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=171 2024-11-28T00:30:11,715 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/cf/a91709a9e898417aa77de1246eb2929d 2024-11-28T00:30:11,715 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/cf/a581c0998e50422583e8b5bccb7aaf0d 2024-11-28T00:30:11,718 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits/12.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c/recovered.edits/12.seqid 2024-11-28T00:30:11,718 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0/recovered.edits/9.seqid 2024-11-28T00:30:11,719 DEBUG [HFileArchiver-20 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:11,719 DEBUG [HFileArchiver-19 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithMergeRegion/daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:30:11,719 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithMergeRegion regions 2024-11-28T00:30:11,719 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-28T00:30:11,720 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf] 2024-11-28T00:30:11,723 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/c4ca4238a0b923820dcc509a6f75849b20241128c12def9147044b08905f5f3930cb676e_a29845d15726dd0a5b11d9ae28fd4c0c 2024-11-28T00:30:11,724 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c/cf/d41d8cd98f00b204e9800998ecf8427e202411283688418a3a9b4e07918f70bbb28d5441_daf7088603a9f84f2be699938b8bcdb0 2024-11-28T00:30:11,725 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithMergeRegion/aaca68e56e63a701f5c1ec9dc2f0511c 2024-11-28T00:30:11,727 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=171, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,729 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithMergeRegion from hbase:meta 2024-11-28T00:30:11,731 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithMergeRegion' descriptor. 2024-11-28T00:30:11,732 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=171, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,732 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithMergeRegion' from region states. 2024-11-28T00:30:11,732 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753811732"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:11,732 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753811732"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:11,734 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:30:11,734 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => daf7088603a9f84f2be699938b8bcdb0, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,,1732753785007.daf7088603a9f84f2be699938b8bcdb0.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => a29845d15726dd0a5b11d9ae28fd4c0c, NAME => 'testtb-testExportFileSystemStateWithMergeRegion,1,1732753785007.a29845d15726dd0a5b11d9ae28fd4c0c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:30:11,734 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithMergeRegion' as deleted. 2024-11-28T00:30:11,734 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithMergeRegion","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753811734"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:11,736 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithMergeRegion state from META 2024-11-28T00:30:11,736 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=171, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,737 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=171, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithMergeRegion in 35 msec 2024-11-28T00:30:11,821 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=171 2024-11-28T00:30:11,821 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,821 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithMergeRegion completed 2024-11-28T00:30:11,828 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-28T00:30:11,830 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,831 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion" type: DISABLED 2024-11-28T00:30:11,832 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:11,833 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithMergeRegion-1" type: DISABLED 2024-11-28T00:30:11,835 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:11,855 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithMergeRegion Thread=809 (was 800) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44741 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-16 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:57980 [Waiting for operation #10] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_627235562_1 at /127.0.0.1:52398 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 7949) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:44741 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HFileArchiver-18 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #12 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #11 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-6297 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:45538 [Waiting for operation #6] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-6-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:62) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:883) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_627235562_1 at /127.0.0.1:45582 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:52334 [Waiting for operation #9] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-17 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=801 (was 782) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=952 (was 1064), ProcessCount=14 (was 15), AvailableMemoryMB=3241 (was 3277) 2024-11-28T00:30:11,855 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-28T00:30:11,872 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=809, OpenFileDescriptor=801, MaxFileDescriptor=1048576, SystemLoadAverage=952, ProcessCount=14, AvailableMemoryMB=3241 2024-11-28T00:30:11,872 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=809 is superior to 500 2024-11-28T00:30:11,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:30:11,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=172, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:11,888 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:30:11,888 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportExpiredSnapshot" procId is: 172 2024-11-28T00:30:11,889 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:30:11,889 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-11-28T00:30:11,895 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:30:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:30:11,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742242_1418 (size=443) 2024-11-28T00:30:11,898 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => d6313816891ca7e96135d8701f76acf8, NAME => 'testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:11,898 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 97b1dc36b5143606cce7037de37fce7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:11,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:30:11,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:30:11,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:30:11,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:30:11,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742244_1420 (size=68) 2024-11-28T00:30:11,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742243_1419 (size=68) 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing d6313816891ca7e96135d8701f76acf8, disabling compactions & flushes 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 97b1dc36b5143606cce7037de37fce7b, disabling compactions & flushes 2024-11-28T00:30:11,905 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:11,905 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. after waiting 0 ms 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. after waiting 0 ms 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:11,905 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:11,905 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:11,905 INFO [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:11,906 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for d6313816891ca7e96135d8701f76acf8: Waiting for close lock at 1732753811905Disabling compacts and flushes for region at 1732753811905Disabling writes for close at 1732753811905Writing region close event to WAL at 1732753811905Closed at 1732753811905 2024-11-28T00:30:11,906 DEBUG [RegionOpenAndInit-testtb-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 97b1dc36b5143606cce7037de37fce7b: Waiting for close lock at 1732753811905Disabling compacts and flushes for region at 1732753811905Disabling writes for close at 1732753811905Writing region close event to WAL at 1732753811905Closed at 1732753811905 2024-11-28T00:30:11,907 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:30:11,907 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732753811907"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753811907"}]},"ts":"1732753811907"} 2024-11-28T00:30:11,907 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.","families":{"info":[{"qualifier":"regioninfo","vlen":67,"tag":[],"timestamp":"1732753811907"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753811907"}]},"ts":"1732753811907"} 2024-11-28T00:30:11,910 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:30:11,910 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:30:11,910 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753811910"}]},"ts":"1732753811910"} 2024-11-28T00:30:11,912 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-28T00:30:11,912 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:30:11,913 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:30:11,913 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:30:11,913 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:30:11,913 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:30:11,914 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, ASSIGN}, {pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, ASSIGN}] 2024-11-28T00:30:11,915 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, ASSIGN 2024-11-28T00:30:11,915 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, ASSIGN 2024-11-28T00:30:11,916 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:30:11,916 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:30:12,001 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-11-28T00:30:12,066 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:30:12,067 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=174 updating hbase:meta row=97b1dc36b5143606cce7037de37fce7b, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:12,067 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=173 updating hbase:meta row=d6313816891ca7e96135d8701f76acf8, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:12,068 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=174, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, ASSIGN because future has completed 2024-11-28T00:30:12,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=175, ppid=174, state=RUNNABLE, hasLock=false; OpenRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:30:12,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=173, ppid=172, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, ASSIGN because future has completed 2024-11-28T00:30:12,070 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=176, ppid=173, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:12,210 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-11-28T00:30:12,224 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:12,224 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7752): Opening region: {ENCODED => 97b1dc36b5143606cce7037de37fce7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:30:12,224 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] handler.AssignRegionHandler(132): Open testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,224 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7752): Opening region: {ENCODED => d6313816891ca7e96135d8701f76acf8, NAME => 'testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:30:12,224 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. service=AccessControlService 2024-11-28T00:30:12,224 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. service=AccessControlService 2024-11-28T00:30:12,225 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:12,225 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportExpiredSnapshot 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(898): Instantiated testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7794): checking encryption for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7794): checking encryption for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(7797): checking classloading for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,225 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(7797): checking classloading for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,226 INFO [StoreOpener-97b1dc36b5143606cce7037de37fce7b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,226 INFO [StoreOpener-d6313816891ca7e96135d8701f76acf8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,228 INFO [StoreOpener-d6313816891ca7e96135d8701f76acf8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region d6313816891ca7e96135d8701f76acf8 columnFamilyName cf 2024-11-28T00:30:12,228 INFO [StoreOpener-97b1dc36b5143606cce7037de37fce7b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 97b1dc36b5143606cce7037de37fce7b columnFamilyName cf 2024-11-28T00:30:12,229 DEBUG [StoreOpener-97b1dc36b5143606cce7037de37fce7b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:12,229 DEBUG [StoreOpener-d6313816891ca7e96135d8701f76acf8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:12,229 INFO [StoreOpener-d6313816891ca7e96135d8701f76acf8-1 {}] regionserver.HStore(327): Store=d6313816891ca7e96135d8701f76acf8/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:12,229 INFO [StoreOpener-97b1dc36b5143606cce7037de37fce7b-1 {}] regionserver.HStore(327): Store=97b1dc36b5143606cce7037de37fce7b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:12,230 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1038): replaying wal for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,230 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1038): replaying wal for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,230 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,230 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1048): stopping wal replay for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1060): Cleaning up temporary data for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1048): stopping wal replay for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,231 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1060): Cleaning up temporary data for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,232 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1093): writing seq id for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,233 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1093): writing seq id for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,234 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:12,235 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1114): Opened d6313816891ca7e96135d8701f76acf8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60046464, jitterRate=-0.10523796081542969}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:12,235 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:12,235 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1122): Running coprocessor post-open hooks for d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,235 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1114): Opened 97b1dc36b5143606cce7037de37fce7b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=68947571, jitterRate=0.027398869395256042}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:12,235 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,235 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegion(1006): Region open journal for d6313816891ca7e96135d8701f76acf8: Running coprocessor pre-open hook at 1732753812225Writing region info on filesystem at 1732753812225Initializing all the Stores at 1732753812226 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753812226Cleaning up temporary data from old regions at 1732753812231 (+5 ms)Running coprocessor post-open hooks at 1732753812235 (+4 ms)Region opened successfully at 1732753812235 2024-11-28T00:30:12,236 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegion(1006): Region open journal for 97b1dc36b5143606cce7037de37fce7b: Running coprocessor pre-open hook at 1732753812225Writing region info on filesystem at 1732753812225Initializing all the Stores at 1732753812226 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753812226Cleaning up temporary data from old regions at 1732753812231 (+5 ms)Running coprocessor post-open hooks at 1732753812235 (+4 ms)Region opened successfully at 1732753812235 2024-11-28T00:30:12,236 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8., pid=176, masterSystemTime=1732753812222 2024-11-28T00:30:12,236 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b., pid=175, masterSystemTime=1732753812220 2024-11-28T00:30:12,238 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:12,238 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=175}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:12,239 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=174 updating hbase:meta row=97b1dc36b5143606cce7037de37fce7b, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:12,239 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,239 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=176}] handler.AssignRegionHandler(153): Opened testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,240 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=173 updating hbase:meta row=d6313816891ca7e96135d8701f76acf8, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:12,241 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=175, ppid=174, state=RUNNABLE, hasLock=false; OpenRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:30:12,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=176, ppid=173, state=RUNNABLE, hasLock=false; OpenRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:12,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=175, resume processing ppid=174 2024-11-28T00:30:12,243 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=175, ppid=174, state=SUCCESS, hasLock=false; OpenRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332 in 173 msec 2024-11-28T00:30:12,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=176, resume processing ppid=173 2024-11-28T00:30:12,244 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=176, ppid=173, state=SUCCESS, hasLock=false; OpenRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207 in 173 msec 2024-11-28T00:30:12,245 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=174, ppid=172, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, ASSIGN in 329 msec 2024-11-28T00:30:12,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=173, resume processing ppid=172 2024-11-28T00:30:12,246 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=173, ppid=172, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, ASSIGN in 330 msec 2024-11-28T00:30:12,247 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:30:12,247 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753812247"}]},"ts":"1732753812247"} 2024-11-28T00:30:12,248 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-28T00:30:12,249 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=172, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:30:12,249 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportExpiredSnapshot jenkins: RWXCA 2024-11-28T00:30:12,253 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:30:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:12,255 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:12,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:12,257 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:12,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:12,258 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:12,259 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=172, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportExpiredSnapshot in 371 msec 2024-11-28T00:30:12,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=172 2024-11-28T00:30:12,521 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:30:12,521 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,524 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-28T00:30:12,524 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,524 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:12,526 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,531 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,535 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,537 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:30:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753812537 (current time:1732753812537). 2024-11-28T00:30:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:30:12,537 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:12,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d1f301, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:12,538 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:12,539 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:12,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:12,539 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:12,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e93b7b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:12,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:12,540 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,541 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:12,542 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22da2bc2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,542 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:12,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:12,543 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,544 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,545 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:12,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:12,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,545 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,545 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:12,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54894453, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:12,546 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:12,547 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@a3e5d4c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:12,547 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,548 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40722, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:12,548 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3f509e2b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,549 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:12,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:12,550 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,551 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57526, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:12,553 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,554 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48146, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,555 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:12,555 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:30:12,556 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:12,557 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:30:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-28T00:30:12,558 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:30:12,559 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:12,560 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:12,563 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:12,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:30:12,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:30:12,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742245_1421 (size=170) 2024-11-28T00:30:12,584 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:12,584 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8}, {pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b}] 2024-11-28T00:30:12,585 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,586 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:30:12,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=178 2024-11-28T00:30:12,738 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=179 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.HRegion(2603): Flush status journal for d6313816891ca7e96135d8701f76acf8: 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.HRegion(2603): Flush status journal for 97b1dc36b5143606cce7037de37fce7b: 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. for emptySnaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.' region-info for snapshot=emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:12,738 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:12,739 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:12,744 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:30:12,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:30:12,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:30:12,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742246_1422 (size=71) 2024-11-28T00:30:12,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:30:12,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=178}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=178 2024-11-28T00:30:12,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742247_1423 (size=71) 2024-11-28T00:30:12,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:12,746 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=179}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=179 2024-11-28T00:30:12,746 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=178 2024-11-28T00:30:12,746 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,747 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=178, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:12,747 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=179 2024-11-28T00:30:12,747 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportExpiredSnapshot on region 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,747 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=179, ppid=177, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,749 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=178, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 in 164 msec 2024-11-28T00:30:12,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=179, resume processing ppid=177 2024-11-28T00:30:12,750 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=179, ppid=177, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b in 164 msec 2024-11-28T00:30:12,750 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:12,751 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:12,751 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:12,751 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:12,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:12,752 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:30:12,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:30:12,758 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:30:12,759 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742248_1424 (size=63) 2024-11-28T00:30:12,760 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:12,760 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:12,760 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:12,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:30:12,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:30:12,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742249_1425 (size=653) 2024-11-28T00:30:12,769 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:12,773 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:12,774 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportExpiredSnapshot to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:12,775 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=177, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:12,775 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 177 2024-11-28T00:30:12,776 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=177, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=177, snapshot={ ss=emptySnaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 218 msec 2024-11-28T00:30:12,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=177 2024-11-28T00:30:12,871 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:30:12,877 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:12,879 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:12,881 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,884 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportExpiredSnapshot 2024-11-28T00:30:12,884 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:12,884 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:12,886 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,890 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,895 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportExpiredSnapshot,, stopping at row=testtb-testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:12,897 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:30:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753812897 (current time:1732753812897). 2024-11-28T00:30:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:30:12,897 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4f7e6072, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:12,898 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:12,899 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7cab0945, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:12,899 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:12,900 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,900 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47482, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:12,901 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@372a5a4d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,901 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:12,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:12,902 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,903 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39110, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,904 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,904 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,904 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3ff3c84d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:12,905 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:12,906 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11bdb253, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:12,906 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:12,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,907 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47506, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:12,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d1fd4c0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:12,908 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:12,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:12,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,910 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39116, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:12,912 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:12,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:12,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:12,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:12,914 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:12,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:30:12,915 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } 2024-11-28T00:30:12,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-28T00:30:12,917 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:12,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:30:12,918 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:12,920 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:12,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:30:12,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:30:12,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742250_1426 (size=165) 2024-11-28T00:30:12,928 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:12,928 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8}, {pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b}] 2024-11-28T00:30:12,929 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:12,929 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:13,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:30:13,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=181 2024-11-28T00:30:13,081 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=182 2024-11-28T00:30:13,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:13,081 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:13,081 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2902): Flushing d6313816891ca7e96135d8701f76acf8 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:30:13,081 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2902): Flushing 97b1dc36b5143606cce7037de37fce7b 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:30:13,100 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 is 71, key is 01c3db862f6594c3f55ccd2b83b4f47e/cf:q/1732753812876/Put/seqid=0 2024-11-28T00:30:13,101 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b is 71, key is 1172ecaf779b97d15cf31cdaee57706d/cf:q/1732753812879/Put/seqid=0 2024-11-28T00:30:13,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742251_1427 (size=5102) 2024-11-28T00:30:13,106 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742251_1427 (size=5102) 2024-11-28T00:30:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742251_1427 (size=5102) 2024-11-28T00:30:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742252_1428 (size=8171) 2024-11-28T00:30:13,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742252_1428 (size=8171) 2024-11-28T00:30:13,107 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:13,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742252_1428 (size=8171) 2024-11-28T00:30:13,108 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:13,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:13,112 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:13,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/.tmp/cf/d123daccd4cd49c9945d0c2a926e156c, store: [table=testtb-testExportExpiredSnapshot family=cf region=d6313816891ca7e96135d8701f76acf8] 2024-11-28T00:30:13,113 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/.tmp/cf/d123daccd4cd49c9945d0c2a926e156c is 209, key is 0484e3325596a39978d1e89227bab3afe/cf:q/1732753812876/Put/seqid=0 2024-11-28T00:30:13,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/.tmp/cf/5b0aad60308245a6bcd60da37ed9fc69, store: [table=testtb-testExportExpiredSnapshot family=cf region=97b1dc36b5143606cce7037de37fce7b] 2024-11-28T00:30:13,114 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/.tmp/cf/5b0aad60308245a6bcd60da37ed9fc69 is 209, key is 1c0a346e7909038fdcc8b375574574aa4/cf:q/1732753812879/Put/seqid=0 2024-11-28T00:30:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742253_1429 (size=5918) 2024-11-28T00:30:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742253_1429 (size=5918) 2024-11-28T00:30:13,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742253_1429 (size=5918) 2024-11-28T00:30:13,125 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/.tmp/cf/d123daccd4cd49c9945d0c2a926e156c 2024-11-28T00:30:13,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742254_1430 (size=14997) 2024-11-28T00:30:13,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742254_1430 (size=14997) 2024-11-28T00:30:13,126 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742254_1430 (size=14997) 2024-11-28T00:30:13,127 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/.tmp/cf/5b0aad60308245a6bcd60da37ed9fc69 2024-11-28T00:30:13,131 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/.tmp/cf/d123daccd4cd49c9945d0c2a926e156c as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c 2024-11-28T00:30:13,132 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/.tmp/cf/5b0aad60308245a6bcd60da37ed9fc69 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69 2024-11-28T00:30:13,137 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:30:13,137 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69, entries=47, sequenceid=6, filesize=14.6 K 2024-11-28T00:30:13,137 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for d6313816891ca7e96135d8701f76acf8 in 56ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-28T00:30:13,138 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 97b1dc36b5143606cce7037de37fce7b in 57ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportExpiredSnapshot' 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.HRegion(2603): Flush status journal for d6313816891ca7e96135d8701f76acf8: 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.HRegion(2603): Flush status journal for 97b1dc36b5143606cce7037de37fce7b: 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. for snaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. for snaptb0-testExportExpiredSnapshot completed. 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.' region-info for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c] hfiles 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69] hfiles 2024-11-28T00:30:13,138 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69 for snapshot=snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,145 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000002/launch_container.sh] 2024-11-28T00:30:13,145 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000002/container_tokens] 2024-11-28T00:30:13,145 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000002/sysfs] 2024-11-28T00:30:13,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:30:13,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:30:13,148 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742255_1431 (size=110) 2024-11-28T00:30:13,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:13,149 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=181}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=181 2024-11-28T00:30:13,150 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=181 2024-11-28T00:30:13,150 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:13,150 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=181, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:13,152 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=181, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure d6313816891ca7e96135d8701f76acf8 in 223 msec 2024-11-28T00:30:13,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:30:13,153 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:30:13,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742256_1432 (size=110) 2024-11-28T00:30:13,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:13,154 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=182}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=182 2024-11-28T00:30:13,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=182 2024-11-28T00:30:13,155 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportExpiredSnapshot on region 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:13,155 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=182, ppid=180, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:13,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=182, resume processing ppid=180 2024-11-28T00:30:13,158 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=182, ppid=180, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 97b1dc36b5143606cce7037de37fce7b in 228 msec 2024-11-28T00:30:13,158 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:13,159 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:13,160 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:13,160 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:13,160 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:13,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8] hfiles 2024-11-28T00:30:13,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:13,161 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:30:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:30:13,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742257_1433 (size=294) 2024-11-28T00:30:13,173 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:13,173 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,174 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,181 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:30:13,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:30:13,186 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:13,192 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742258_1434 (size=963) 2024-11-28T00:30:13,193 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:13,193 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportExpiredSnapshot to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:13,195 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=180, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:13,195 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 }, snapshot procedure id = 180 2024-11-28T00:30:13,196 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=180, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=180, snapshot={ ss=snaptb0-testExportExpiredSnapshot table=testtb-testExportExpiredSnapshot type=FLUSH ttl=0 } in 280 msec 2024-11-28T00:30:13,231 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=180 2024-11-28T00:30:13,231 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:30:13,232 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:30:13,234 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=183, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot 2024-11-28T00:30:13,235 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:30:13,236 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testExportExpiredSnapshot" procId is: 183 2024-11-28T00:30:13,236 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:30:13,236 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-28T00:30:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:30:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:30:13,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742259_1435 (size=436) 2024-11-28T00:30:13,248 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 3790779e6552562e94070478b4a3d9dc, NAME => 'testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:13,248 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 76fb4953fe949fdbfff19f1d73203841, NAME => 'testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testExportExpiredSnapshot', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:13,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:30:13,258 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:30:13,259 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:13,259 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1722): Closing 76fb4953fe949fdbfff19f1d73203841, disabling compactions & flushes 2024-11-28T00:30:13,259 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,259 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,259 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. after waiting 0 ms 2024-11-28T00:30:13,259 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,260 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-1 {}] regionserver.HRegion(1676): Region close journal for 76fb4953fe949fdbfff19f1d73203841: Waiting for close lock at 1732753813259Disabling compacts and flushes for region at 1732753813259Disabling writes for close at 1732753813259Writing region close event to WAL at 1732753813260 (+1 ms)Closed at 1732753813260 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1722): Closing 3790779e6552562e94070478b4a3d9dc, disabling compactions & flushes 2024-11-28T00:30:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:30:13,260 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742261_1437 (size=61) 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. after waiting 0 ms 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,260 INFO [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742260_1436 (size=61) 2024-11-28T00:30:13,260 DEBUG [RegionOpenAndInit-testExportExpiredSnapshot-pool-0 {}] regionserver.HRegion(1676): Region close journal for 3790779e6552562e94070478b4a3d9dc: Waiting for close lock at 1732753813260Disabling compacts and flushes for region at 1732753813260Disabling writes for close at 1732753813260Writing region close event to WAL at 1732753813260Closed at 1732753813260 2024-11-28T00:30:13,261 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:30:13,262 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732753813261"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753813261"}]},"ts":"1732753813261"} 2024-11-28T00:30:13,262 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.","families":{"info":[{"qualifier":"regioninfo","vlen":60,"tag":[],"timestamp":"1732753813261"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753813261"}]},"ts":"1732753813261"} 2024-11-28T00:30:13,265 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:30:13,266 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:30:13,266 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753813266"}]},"ts":"1732753813266"} 2024-11-28T00:30:13,268 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLING in hbase:meta 2024-11-28T00:30:13,268 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:30:13,269 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:30:13,270 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:30:13,270 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:30:13,270 INFO [PEWorker-2 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:30:13,270 DEBUG [PEWorker-2 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:30:13,270 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3790779e6552562e94070478b4a3d9dc, ASSIGN}, {pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=76fb4953fe949fdbfff19f1d73203841, ASSIGN}] 2024-11-28T00:30:13,271 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=76fb4953fe949fdbfff19f1d73203841, ASSIGN 2024-11-28T00:30:13,271 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3790779e6552562e94070478b4a3d9dc, ASSIGN 2024-11-28T00:30:13,272 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=76fb4953fe949fdbfff19f1d73203841, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:30:13,272 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3790779e6552562e94070478b4a3d9dc, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:30:13,341 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-28T00:30:13,422 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:30:13,423 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=184 updating hbase:meta row=3790779e6552562e94070478b4a3d9dc, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:13,423 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=185 updating hbase:meta row=76fb4953fe949fdbfff19f1d73203841, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:13,425 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=185, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=76fb4953fe949fdbfff19f1d73203841, ASSIGN because future has completed 2024-11-28T00:30:13,425 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=186, ppid=185, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76fb4953fe949fdbfff19f1d73203841, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:30:13,426 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=184, ppid=183, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3790779e6552562e94070478b4a3d9dc, ASSIGN because future has completed 2024-11-28T00:30:13,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=187, ppid=184, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3790779e6552562e94070478b4a3d9dc, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:30:13,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-28T00:30:13,580 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,580 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7752): Opening region: {ENCODED => 76fb4953fe949fdbfff19f1d73203841, NAME => 'testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:30:13,580 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. service=AccessControlService 2024-11-28T00:30:13,581 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] handler.AssignRegionHandler(132): Open testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,581 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7752): Opening region: {ENCODED => 3790779e6552562e94070478b4a3d9dc, NAME => 'testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(8280): Registered coprocessor service: region=testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. service=AccessControlService 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7794): checking encryption for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(7797): checking classloading for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,581 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testExportExpiredSnapshot 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(898): Instantiated testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7794): checking encryption for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,581 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(7797): checking classloading for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,582 INFO [StoreOpener-76fb4953fe949fdbfff19f1d73203841-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,582 INFO [StoreOpener-3790779e6552562e94070478b4a3d9dc-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,584 INFO [StoreOpener-3790779e6552562e94070478b4a3d9dc-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3790779e6552562e94070478b4a3d9dc columnFamilyName cf 2024-11-28T00:30:13,584 DEBUG [StoreOpener-3790779e6552562e94070478b4a3d9dc-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:13,585 INFO [StoreOpener-3790779e6552562e94070478b4a3d9dc-1 {}] regionserver.HStore(327): Store=3790779e6552562e94070478b4a3d9dc/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:13,585 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1038): replaying wal for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,586 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,586 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,586 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1048): stopping wal replay for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,586 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1060): Cleaning up temporary data for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,588 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1093): writing seq id for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,590 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:13,590 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1114): Opened 3790779e6552562e94070478b4a3d9dc; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=62954157, jitterRate=-0.061909958720207214}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:13,590 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,591 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegion(1006): Region open journal for 3790779e6552562e94070478b4a3d9dc: Running coprocessor pre-open hook at 1732753813581Writing region info on filesystem at 1732753813581Initializing all the Stores at 1732753813582 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753813582Cleaning up temporary data from old regions at 1732753813586 (+4 ms)Running coprocessor post-open hooks at 1732753813590 (+4 ms)Region opened successfully at 1732753813591 (+1 ms) 2024-11-28T00:30:13,592 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc., pid=187, masterSystemTime=1732753813578 2024-11-28T00:30:13,592 INFO [StoreOpener-76fb4953fe949fdbfff19f1d73203841-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76fb4953fe949fdbfff19f1d73203841 columnFamilyName cf 2024-11-28T00:30:13,593 DEBUG [StoreOpener-76fb4953fe949fdbfff19f1d73203841-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:13,594 INFO [StoreOpener-76fb4953fe949fdbfff19f1d73203841-1 {}] regionserver.HStore(327): Store=76fb4953fe949fdbfff19f1d73203841/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:13,594 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,594 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=187}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,594 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1038): replaying wal for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,595 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=184 updating hbase:meta row=3790779e6552562e94070478b4a3d9dc, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:13,595 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,595 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,595 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1048): stopping wal replay for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,595 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1060): Cleaning up temporary data for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,597 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=187, ppid=184, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3790779e6552562e94070478b4a3d9dc, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:30:13,597 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1093): writing seq id for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=187, resume processing ppid=184 2024-11-28T00:30:13,599 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:13,599 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=187, ppid=184, state=SUCCESS, hasLock=false; OpenRegionProcedure 3790779e6552562e94070478b4a3d9dc, server=72c8c7f04009,43335,1732753506004 in 171 msec 2024-11-28T00:30:13,600 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1114): Opened 76fb4953fe949fdbfff19f1d73203841; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=70697858, jitterRate=0.053480178117752075}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:13,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:13,600 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegion(1006): Region open journal for 76fb4953fe949fdbfff19f1d73203841: Running coprocessor pre-open hook at 1732753813581Writing region info on filesystem at 1732753813581Initializing all the Stores at 1732753813582 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753813582Cleaning up temporary data from old regions at 1732753813595 (+13 ms)Running coprocessor post-open hooks at 1732753813600 (+5 ms)Region opened successfully at 1732753813600 2024-11-28T00:30:13,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=184, ppid=183, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=3790779e6552562e94070478b4a3d9dc, ASSIGN in 329 msec 2024-11-28T00:30:13,601 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegionServer(2236): Post open deploy tasks for testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841., pid=186, masterSystemTime=1732753813577 2024-11-28T00:30:13,602 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] regionserver.HRegionServer(2266): Finished post open deploy task for testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,602 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=186}] handler.AssignRegionHandler(153): Opened testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:13,602 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=185 updating hbase:meta row=76fb4953fe949fdbfff19f1d73203841, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:13,604 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=186, ppid=185, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76fb4953fe949fdbfff19f1d73203841, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:30:13,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=186, resume processing ppid=185 2024-11-28T00:30:13,606 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=186, ppid=185, state=SUCCESS, hasLock=false; OpenRegionProcedure 76fb4953fe949fdbfff19f1d73203841, server=72c8c7f04009,36427,1732753506332 in 180 msec 2024-11-28T00:30:13,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=185, resume processing ppid=183 2024-11-28T00:30:13,608 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=185, ppid=183, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testExportExpiredSnapshot, region=76fb4953fe949fdbfff19f1d73203841, ASSIGN in 336 msec 2024-11-28T00:30:13,608 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:30:13,609 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753813608"}]},"ts":"1732753813608"} 2024-11-28T00:30:13,610 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testExportExpiredSnapshot, state=ENABLED in hbase:meta 2024-11-28T00:30:13,611 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=183, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testExportExpiredSnapshot execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:30:13,611 DEBUG [PEWorker-2 {}] access.PermissionStorage(177): Writing permission with rowKey testExportExpiredSnapshot jenkins: RWXCA 2024-11-28T00:30:13,613 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:30:13,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:13,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:13,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:13,615 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF\x0AF\x0A\x07jenkins\x12;\x08\x03"7\x0A+\x0A\x07default\x12 testtb-testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,622 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:13,623 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=183, state=SUCCESS, hasLock=false; CreateTableProcedure table=testExportExpiredSnapshot in 388 msec 2024-11-28T00:30:13,861 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=183 2024-11-28T00:30:13,862 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testExportExpiredSnapshot completed 2024-11-28T00:30:13,862 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,864 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-28T00:30:13,864 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,865 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:13,866 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,871 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,877 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,886 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:13,889 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:13,890 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,893 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testExportExpiredSnapshot 2024-11-28T00:30:13,893 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:13,893 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:13,895 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,900 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testExportExpiredSnapshot,, stopping at row=testExportExpiredSnapshot ,, for max=2147483647 with caching=100 2024-11-28T00:30:13,906 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-28T00:30:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snapshot-testExportExpiredSnapshot VERSION not specified, setting to 2 2024-11-28T00:30:13,906 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@424ce114, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:13,907 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:13,908 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:13,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:13,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:13,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69d59770, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:13,908 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:13,909 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,909 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:13,910 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1944a31f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,910 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:13,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:13,911 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:13,912 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39128, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:13,913 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,913 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,913 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2c7162, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:13,914 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:13,915 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35923f67, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:13,915 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,916 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47552, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:13,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2571a7cc, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:13,917 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:13,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:13,918 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:13,919 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39138, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:13,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testExportExpiredSnapshot', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:13,920 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:13,921 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33648, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:13,922 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:30:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:13,922 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:13,922 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testExportExpiredSnapshot], kv [jenkins: RWXCA] 2024-11-28T00:30:13,923 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } 2024-11-28T00:30:13,924 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 188 2024-11-28T00:30:13,925 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:30:13,925 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:13,926 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:13,928 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:30:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:30:13,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742262_1438 (size=152) 2024-11-28T00:30:13,934 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:13,934 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3790779e6552562e94070478b4a3d9dc}, {pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76fb4953fe949fdbfff19f1d73203841}] 2024-11-28T00:30:13,935 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:13,935 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:14,031 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:30:14,085 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=189 2024-11-28T00:30:14,086 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:14,086 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2902): Flushing 3790779e6552562e94070478b4a3d9dc 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:30:14,086 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=190 2024-11-28T00:30:14,087 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:14,087 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2902): Flushing 76fb4953fe949fdbfff19f1d73203841 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:30:14,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128750e77229e0548cd8643adf8479aba58_3790779e6552562e94070478b4a3d9dc is 71, key is 0382fa6ce1756c9a8eb8f45454207e40/cf:q/1732753813886/Put/seqid=0 2024-11-28T00:30:14,104 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128ade4695a5bae4f5395bbfa1fe6706acf_76fb4953fe949fdbfff19f1d73203841 is 71, key is 251432006de90d056bb2b9f521e343b4/cf:q/1732753813889/Put/seqid=0 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742263_1439 (size=5102) 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742263_1439 (size=5102) 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742264_1440 (size=8172) 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742264_1440 (size=8172) 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742263_1439 (size=5102) 2024-11-28T00:30:14,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742264_1440 (size=8172) 2024-11-28T00:30:14,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:30:14,393 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:30:14,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:14,515 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:14,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128750e77229e0548cd8643adf8479aba58_3790779e6552562e94070478b4a3d9dc to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128750e77229e0548cd8643adf8479aba58_3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:14,520 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128ade4695a5bae4f5395bbfa1fe6706acf_76fb4953fe949fdbfff19f1d73203841 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241128ade4695a5bae4f5395bbfa1fe6706acf_76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:14,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/.tmp/cf/765ecb3ef686497e81ea533a94602976, store: [table=testExportExpiredSnapshot family=cf region=76fb4953fe949fdbfff19f1d73203841] 2024-11-28T00:30:14,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/.tmp/cf/9a10f9408a354841bd82af62ef0569dc, store: [table=testExportExpiredSnapshot family=cf region=3790779e6552562e94070478b4a3d9dc] 2024-11-28T00:30:14,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/.tmp/cf/765ecb3ef686497e81ea533a94602976 is 202, key is 1c8c6c96b82274536531abb500c5cc450/cf:q/1732753813889/Put/seqid=0 2024-11-28T00:30:14,521 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/.tmp/cf/9a10f9408a354841bd82af62ef0569dc is 202, key is 0fa672fce3df6367c8dce6677d7dd9635/cf:q/1732753813886/Put/seqid=0 2024-11-28T00:30:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742266_1442 (size=14663) 2024-11-28T00:30:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742265_1441 (size=5890) 2024-11-28T00:30:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742265_1441 (size=5890) 2024-11-28T00:30:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742266_1442 (size=14663) 2024-11-28T00:30:14,531 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742266_1442 (size=14663) 2024-11-28T00:30:14,532 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/.tmp/cf/765ecb3ef686497e81ea533a94602976 2024-11-28T00:30:14,532 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=5, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/.tmp/cf/9a10f9408a354841bd82af62ef0569dc 2024-11-28T00:30:14,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742265_1441 (size=5890) 2024-11-28T00:30:14,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/.tmp/cf/765ecb3ef686497e81ea533a94602976 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/cf/765ecb3ef686497e81ea533a94602976 2024-11-28T00:30:14,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/.tmp/cf/9a10f9408a354841bd82af62ef0569dc as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/cf/9a10f9408a354841bd82af62ef0569dc 2024-11-28T00:30:14,541 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/cf/9a10f9408a354841bd82af62ef0569dc, entries=3, sequenceid=5, filesize=5.8 K 2024-11-28T00:30:14,541 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/cf/765ecb3ef686497e81ea533a94602976, entries=47, sequenceid=5, filesize=14.3 K 2024-11-28T00:30:14,542 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for 3790779e6552562e94070478b4a3d9dc in 456ms, sequenceid=5, compaction requested=false 2024-11-28T00:30:14,542 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-28T00:30:14,543 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 76fb4953fe949fdbfff19f1d73203841 in 455ms, sequenceid=5, compaction requested=false 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testExportExpiredSnapshot' 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.HRegion(2603): Flush status journal for 76fb4953fe949fdbfff19f1d73203841: 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.HRegion(2603): Flush status journal for 3790779e6552562e94070478b4a3d9dc: 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. for snapshot-testExportExpiredSnapshot completed. 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(75): Snapshotting region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. for snapshot-testExportExpiredSnapshot completed. 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,543 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/cf/765ecb3ef686497e81ea533a94602976] hfiles 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/cf/765ecb3ef686497e81ea533a94602976 for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(241): Storing 'testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.' region-info for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/cf/9a10f9408a354841bd82af62ef0569dc] hfiles 2024-11-28T00:30:14,544 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/cf/9a10f9408a354841bd82af62ef0569dc for snapshot=snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,551 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:30:14,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:30:14,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:30:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:30:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:30:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742267_1443 (size=103) 2024-11-28T00:30:14,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742268_1444 (size=103) 2024-11-28T00:30:14,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:30:14,555 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=189}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=189 2024-11-28T00:30:14,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:30:14,556 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=190}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=190 2024-11-28T00:30:14,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=189 2024-11-28T00:30:14,556 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:14,556 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=190 2024-11-28T00:30:14,556 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=189, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:14,556 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snapshot-testExportExpiredSnapshot on region 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:14,556 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=190, ppid=188, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:14,558 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=189, ppid=188, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3790779e6552562e94070478b4a3d9dc in 623 msec 2024-11-28T00:30:14,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=190, resume processing ppid=188 2024-11-28T00:30:14,559 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=190, ppid=188, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 76fb4953fe949fdbfff19f1d73203841 in 623 msec 2024-11-28T00:30:14,559 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:14,560 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:14,561 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:14,561 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:14,561 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:14,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241128ade4695a5bae4f5395bbfa1fe6706acf_76fb4953fe949fdbfff19f1d73203841, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128750e77229e0548cd8643adf8479aba58_3790779e6552562e94070478b4a3d9dc] hfiles 2024-11-28T00:30:14,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/c4ca4238a0b923820dcc509a6f75849b20241128ade4695a5bae4f5395bbfa1fe6706acf_76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:30:14,562 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testExportExpiredSnapshot/1030f41967fbb659ab4c2a7a1774d313/cf/d41d8cd98f00b204e9800998ecf8427e20241128750e77229e0548cd8643adf8479aba58_3790779e6552562e94070478b4a3d9dc 2024-11-28T00:30:14,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:30:14,568 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:30:14,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742269_1445 (size=287) 2024-11-28T00:30:14,572 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:14,572 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,573 DEBUG [PEWorker-3 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:30:14,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742270_1446 (size=935) 2024-11-28T00:30:14,584 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:14,589 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:14,590 DEBUG [PEWorker-3 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snapshot-testExportExpiredSnapshot 2024-11-28T00:30:14,591 INFO [PEWorker-3 {}] procedure.SnapshotProcedure(134): pid=188, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:14,591 DEBUG [PEWorker-3 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 }, snapshot procedure id = 188 2024-11-28T00:30:14,592 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=188, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=188, snapshot={ ss=snapshot-testExportExpiredSnapshot table=testExportExpiredSnapshot type=FLUSH ttl=10 } in 668 msec 2024-11-28T00:30:15,061 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=188 2024-11-28T00:30:15,061 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testExportExpiredSnapshot completed 2024-11-28T00:30:15,195 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0007_000001 (auth:SIMPLE) from 127.0.0.1:55684 2024-11-28T00:30:15,207 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000001/launch_container.sh] 2024-11-28T00:30:15,207 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000001/container_tokens] 2024-11-28T00:30:15,207 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0007/container_1732753513471_0007_01_000001/sysfs] 2024-11-28T00:30:15,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-28T00:30:15,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-28T00:30:15,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-28T00:30:15,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot Metrics about Tables on a single HBase RegionServer 2024-11-28T00:30:15,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion-1 2024-11-28T00:30:15,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithMergeRegion 2024-11-28T00:30:17,189 INFO [regionserver/72c8c7f04009:0.Chore.1 {}] regionserver.HRegionServer$PeriodicMemStoreFlusher(1763): MemstoreFlusherChore requesting flush of hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. because 91195ca5c70c54af0f84245ffc432b72/l has an old edit so flush to free WALs after random delay 47642 ms 2024-11-28T00:30:21,044 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:30:25,069 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753825069 2024-11-28T00:30:25,069 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753825069, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753825069, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:25,115 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:25,115 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753825069, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753825069/.hbase-snapshot/.tmp/snapshot-testExportExpiredSnapshot 2024-11-28T00:30:25,127 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:30:25,129 ERROR [Time-limited test {}] util.AbstractHBaseTool(152): Error running command-line tool org.apache.hadoop.hbase.snapshot.SnapshotTTLExpiredException: TTL for snapshot 'snapshot-testExportExpiredSnapshot' has already expired. at org.apache.hadoop.hbase.snapshot.ExportSnapshot.verifySnapshot(ExportSnapshot.java:960) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1105) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportExpiredSnapshot(TestExportSnapshot.java:362) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:30:25,130 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,131 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=191, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-11-28T00:30:25,134 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753825134"}]},"ts":"1732753825134"} 2024-11-28T00:30:25,136 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLING in hbase:meta 2024-11-28T00:30:25,136 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportExpiredSnapshot to state=DISABLING 2024-11-28T00:30:25,137 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=192, ppid=191, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot}] 2024-11-28T00:30:25,138 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, UNASSIGN}, {pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, UNASSIGN}] 2024-11-28T00:30:25,140 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, UNASSIGN 2024-11-28T00:30:25,140 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, UNASSIGN 2024-11-28T00:30:25,141 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=97b1dc36b5143606cce7037de37fce7b, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:25,142 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=193 updating hbase:meta row=d6313816891ca7e96135d8701f76acf8, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:25,143 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=194, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, UNASSIGN because future has completed 2024-11-28T00:30:25,144 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:25,144 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=195, ppid=194, state=RUNNABLE, hasLock=false; CloseRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:30:25,145 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=193, ppid=192, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, UNASSIGN because future has completed 2024-11-28T00:30:25,145 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:25,145 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=196, ppid=193, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:25,241 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-11-28T00:30:25,297 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(122): Close 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:25,297 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:25,297 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1722): Closing 97b1dc36b5143606cce7037de37fce7b, disabling compactions & flushes 2024-11-28T00:30:25,297 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:25,297 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:25,297 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. after waiting 0 ms 2024-11-28T00:30:25,297 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:25,299 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(122): Close d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:25,299 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:25,299 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1722): Closing d6313816891ca7e96135d8701f76acf8, disabling compactions & flushes 2024-11-28T00:30:25,299 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1755): Closing region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:25,299 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:25,299 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. after waiting 0 ms 2024-11-28T00:30:25,299 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:25,309 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:25,310 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:25,310 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b. 2024-11-28T00:30:25,310 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] regionserver.HRegion(1676): Region close journal for 97b1dc36b5143606cce7037de37fce7b: Waiting for close lock at 1732753825297Running coprocessor pre-close hooks at 1732753825297Disabling compacts and flushes for region at 1732753825297Disabling writes for close at 1732753825297Writing region close event to WAL at 1732753825298 (+1 ms)Running coprocessor post-close hooks at 1732753825310 (+12 ms)Closed at 1732753825310 2024-11-28T00:30:25,312 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=195}] handler.UnassignRegionHandler(157): Closed 97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:25,312 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:25,313 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=194 updating hbase:meta row=97b1dc36b5143606cce7037de37fce7b, regionState=CLOSED 2024-11-28T00:30:25,313 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:25,313 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1973): Closed testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8. 2024-11-28T00:30:25,313 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] regionserver.HRegion(1676): Region close journal for d6313816891ca7e96135d8701f76acf8: Waiting for close lock at 1732753825299Running coprocessor pre-close hooks at 1732753825299Disabling compacts and flushes for region at 1732753825299Disabling writes for close at 1732753825299Writing region close event to WAL at 1732753825308 (+9 ms)Running coprocessor post-close hooks at 1732753825313 (+5 ms)Closed at 1732753825313 2024-11-28T00:30:25,314 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=195, ppid=194, state=RUNNABLE, hasLock=false; CloseRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:30:25,315 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=196}] handler.UnassignRegionHandler(157): Closed d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:25,315 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=193 updating hbase:meta row=d6313816891ca7e96135d8701f76acf8, regionState=CLOSED 2024-11-28T00:30:25,317 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=196, ppid=193, state=RUNNABLE, hasLock=false; CloseRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:25,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=195, resume processing ppid=194 2024-11-28T00:30:25,318 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=195, ppid=194, state=SUCCESS, hasLock=false; CloseRegionProcedure 97b1dc36b5143606cce7037de37fce7b, server=72c8c7f04009,36427,1732753506332 in 172 msec 2024-11-28T00:30:25,320 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=194, ppid=192, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=97b1dc36b5143606cce7037de37fce7b, UNASSIGN in 180 msec 2024-11-28T00:30:25,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=196, resume processing ppid=193 2024-11-28T00:30:25,320 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=196, ppid=193, state=SUCCESS, hasLock=false; CloseRegionProcedure d6313816891ca7e96135d8701f76acf8, server=72c8c7f04009,41621,1732753506207 in 173 msec 2024-11-28T00:30:25,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=193, resume processing ppid=192 2024-11-28T00:30:25,321 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=193, ppid=192, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportExpiredSnapshot, region=d6313816891ca7e96135d8701f76acf8, UNASSIGN in 182 msec 2024-11-28T00:30:25,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=192, resume processing ppid=191 2024-11-28T00:30:25,324 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=192, ppid=191, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportExpiredSnapshot in 186 msec 2024-11-28T00:30:25,325 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753825325"}]},"ts":"1732753825325"} 2024-11-28T00:30:25,327 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportExpiredSnapshot, state=DISABLED in hbase:meta 2024-11-28T00:30:25,327 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportExpiredSnapshot to state=DISABLED 2024-11-28T00:30:25,329 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=191, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportExpiredSnapshot in 197 msec 2024-11-28T00:30:25,451 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=191 2024-11-28T00:30:25,452 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:30:25,452 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,453 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,454 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=197, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,454 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,455 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=197, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,457 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,459 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:25,459 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,461 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/recovered.edits] 2024-11-28T00:30:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,461 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:30:25,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:30:25,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:30:25,462 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/recovered.edits] 2024-11-28T00:30:25,462 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportExpiredSnapshot with data PBUF 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:25,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:25,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=197 2024-11-28T00:30:25,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:25,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:25,465 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:25,469 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/cf/5b0aad60308245a6bcd60da37ed9fc69 2024-11-28T00:30:25,469 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/cf/d123daccd4cd49c9945d0c2a926e156c 2024-11-28T00:30:25,472 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8/recovered.edits/9.seqid 2024-11-28T00:30:25,472 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b/recovered.edits/9.seqid 2024-11-28T00:30:25,473 DEBUG [HFileArchiver-21 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:25,473 DEBUG [HFileArchiver-22 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportExpiredSnapshot/97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:25,473 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportExpiredSnapshot regions 2024-11-28T00:30:25,473 DEBUG [PEWorker-2 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-28T00:30:25,474 DEBUG [PEWorker-2 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf] 2024-11-28T00:30:25,477 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/c4ca4238a0b923820dcc509a6f75849b202411282061ab530bf8464b9b8e7b0cbe84c68a_97b1dc36b5143606cce7037de37fce7b 2024-11-28T00:30:25,478 DEBUG [PEWorker-2 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f/cf/d41d8cd98f00b204e9800998ecf8427e20241128082eb4a2b21a407aa361230d7da8b86f_d6313816891ca7e96135d8701f76acf8 2024-11-28T00:30:25,479 DEBUG [PEWorker-2 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportExpiredSnapshot/3d21f28acea2939462cb18e5d9576f2f 2024-11-28T00:30:25,481 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=197, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,483 WARN [PEWorker-2 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportExpiredSnapshot from hbase:meta 2024-11-28T00:30:25,486 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportExpiredSnapshot' descriptor. 2024-11-28T00:30:25,487 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=197, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,487 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportExpiredSnapshot' from region states. 2024-11-28T00:30:25,487 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753825487"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:25,487 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753825487"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:25,490 INFO [PEWorker-2 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:30:25,490 DEBUG [PEWorker-2 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => d6313816891ca7e96135d8701f76acf8, NAME => 'testtb-testExportExpiredSnapshot,,1732753811873.d6313816891ca7e96135d8701f76acf8.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 97b1dc36b5143606cce7037de37fce7b, NAME => 'testtb-testExportExpiredSnapshot,1,1732753811873.97b1dc36b5143606cce7037de37fce7b.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:30:25,490 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportExpiredSnapshot' as deleted. 2024-11-28T00:30:25,490 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportExpiredSnapshot","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753825490"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:25,492 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportExpiredSnapshot state from META 2024-11-28T00:30:25,493 DEBUG [PEWorker-2 {}] procedure.DeleteTableProcedure(133): Finished pid=197, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,494 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=197, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportExpiredSnapshot in 41 msec 2024-11-28T00:30:25,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,571 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=197 2024-11-28T00:30:25,572 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportExpiredSnapshot 2024-11-28T00:30:25,572 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportExpiredSnapshot completed 2024-11-28T00:30:25,580 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:30:25,582 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportExpiredSnapshot 2024-11-28T00:30:25,583 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snapshot-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:30:25,585 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snapshot-testExportExpiredSnapshot 2024-11-28T00:30:25,586 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportExpiredSnapshot" type: DISABLED 2024-11-28T00:30:25,587 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportExpiredSnapshot 2024-11-28T00:30:25,616 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportExpiredSnapshot Thread=797 (was 809), OpenFileDescriptor=771 (was 801), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=778 (was 952), ProcessCount=14 (was 14), AvailableMemoryMB=3254 (was 3241) - AvailableMemoryMB LEAK? - 2024-11-28T00:30:25,616 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-11-28T00:30:25,641 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=797, OpenFileDescriptor=771, MaxFileDescriptor=1048576, SystemLoadAverage=778, ProcessCount=14, AvailableMemoryMB=3251 2024-11-28T00:30:25,641 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=797 is superior to 500 2024-11-28T00:30:25,643 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:30:25,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:25,645 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:30:25,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testEmptyExportFileSystemState" procId is: 198 2024-11-28T00:30:25,646 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:30:25,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-28T00:30:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:30:25,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:30:25,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742271_1447 (size=448) 2024-11-28T00:30:25,661 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => aa92e9cd35a1b1216e0e190631ad4f4b, NAME => 'testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:25,662 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 3a633d932d42bf1603d22a7430fcebad, NAME => 'testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testEmptyExportFileSystemState', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:25,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:30:25,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:30:25,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742272_1448 (size=73) 2024-11-28T00:30:25,679 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:25,679 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1722): Closing aa92e9cd35a1b1216e0e190631ad4f4b, disabling compactions & flushes 2024-11-28T00:30:25,679 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:25,679 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:25,679 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. after waiting 0 ms 2024-11-28T00:30:25,679 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:25,680 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:25,680 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-0 {}] regionserver.HRegion(1676): Region close journal for aa92e9cd35a1b1216e0e190631ad4f4b: Waiting for close lock at 1732753825679Disabling compacts and flushes for region at 1732753825679Disabling writes for close at 1732753825679Writing region close event to WAL at 1732753825680 (+1 ms)Closed at 1732753825680 2024-11-28T00:30:25,685 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:30:25,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:30:25,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742273_1449 (size=73) 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1722): Closing 3a633d932d42bf1603d22a7430fcebad, disabling compactions & flushes 2024-11-28T00:30:25,687 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. after waiting 0 ms 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:25,687 INFO [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:25,687 DEBUG [RegionOpenAndInit-testtb-testEmptyExportFileSystemState-pool-1 {}] regionserver.HRegion(1676): Region close journal for 3a633d932d42bf1603d22a7430fcebad: Waiting for close lock at 1732753825687Disabling compacts and flushes for region at 1732753825687Disabling writes for close at 1732753825687Writing region close event to WAL at 1732753825687Closed at 1732753825687 2024-11-28T00:30:25,689 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:30:25,689 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732753825689"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753825689"}]},"ts":"1732753825689"} 2024-11-28T00:30:25,689 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.","families":{"info":[{"qualifier":"regioninfo","vlen":72,"tag":[],"timestamp":"1732753825689"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753825689"}]},"ts":"1732753825689"} 2024-11-28T00:30:25,691 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:30:25,692 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:30:25,692 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753825692"}]},"ts":"1732753825692"} 2024-11-28T00:30:25,694 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLING in hbase:meta 2024-11-28T00:30:25,694 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:30:25,695 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:30:25,695 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:30:25,695 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:30:25,695 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:30:25,696 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, ASSIGN}, {pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, ASSIGN}] 2024-11-28T00:30:25,697 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, ASSIGN 2024-11-28T00:30:25,698 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:30:25,698 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, ASSIGN 2024-11-28T00:30:25,699 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:30:25,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-28T00:30:25,848 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:30:25,849 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=199 updating hbase:meta row=aa92e9cd35a1b1216e0e190631ad4f4b, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:25,849 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=200 updating hbase:meta row=3a633d932d42bf1603d22a7430fcebad, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:25,851 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=199, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, ASSIGN because future has completed 2024-11-28T00:30:25,851 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=201, ppid=199, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:30:25,852 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=200, ppid=198, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, ASSIGN because future has completed 2024-11-28T00:30:25,852 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=202, ppid=200, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:25,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-28T00:30:26,008 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,008 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(132): Open testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:26,008 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7752): Opening region: {ENCODED => aa92e9cd35a1b1216e0e190631ad4f4b, NAME => 'testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7752): Opening region: {ENCODED => 3a633d932d42bf1603d22a7430fcebad, NAME => 'testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. service=AccessControlService 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. service=AccessControlService 2024-11-28T00:30:26,009 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:26,009 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testEmptyExportFileSystemState aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(898): Instantiated testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7794): checking encryption for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7794): checking encryption for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(7797): checking classloading for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,009 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(7797): checking classloading for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,011 INFO [StoreOpener-3a633d932d42bf1603d22a7430fcebad-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,014 INFO [StoreOpener-3a633d932d42bf1603d22a7430fcebad-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3a633d932d42bf1603d22a7430fcebad columnFamilyName cf 2024-11-28T00:30:26,014 DEBUG [StoreOpener-3a633d932d42bf1603d22a7430fcebad-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:26,015 INFO [StoreOpener-3a633d932d42bf1603d22a7430fcebad-1 {}] regionserver.HStore(327): Store=3a633d932d42bf1603d22a7430fcebad/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:26,015 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1038): replaying wal for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,016 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,016 INFO [StoreOpener-aa92e9cd35a1b1216e0e190631ad4f4b-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,017 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,018 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1048): stopping wal replay for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,018 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1060): Cleaning up temporary data for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,019 INFO [StoreOpener-aa92e9cd35a1b1216e0e190631ad4f4b-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region aa92e9cd35a1b1216e0e190631ad4f4b columnFamilyName cf 2024-11-28T00:30:26,019 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1093): writing seq id for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,020 DEBUG [StoreOpener-aa92e9cd35a1b1216e0e190631ad4f4b-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:26,026 INFO [StoreOpener-aa92e9cd35a1b1216e0e190631ad4f4b-1 {}] regionserver.HStore(327): Store=aa92e9cd35a1b1216e0e190631ad4f4b/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:26,027 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1038): replaying wal for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,028 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,028 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,029 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1048): stopping wal replay for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,029 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1060): Cleaning up temporary data for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,031 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1093): writing seq id for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,045 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:26,045 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1114): Opened aa92e9cd35a1b1216e0e190631ad4f4b; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71247892, jitterRate=0.06167632341384888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:26,046 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1122): Running coprocessor post-open hooks for aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,046 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:26,046 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1114): Opened 3a633d932d42bf1603d22a7430fcebad; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71338340, jitterRate=0.06302410364151001}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:26,046 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegion(1006): Region open journal for aa92e9cd35a1b1216e0e190631ad4f4b: Running coprocessor pre-open hook at 1732753826010Writing region info on filesystem at 1732753826010Initializing all the Stores at 1732753826015 (+5 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753826015Cleaning up temporary data from old regions at 1732753826029 (+14 ms)Running coprocessor post-open hooks at 1732753826046 (+17 ms)Region opened successfully at 1732753826046 2024-11-28T00:30:26,047 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,047 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegion(1006): Region open journal for 3a633d932d42bf1603d22a7430fcebad: Running coprocessor pre-open hook at 1732753826010Writing region info on filesystem at 1732753826010Initializing all the Stores at 1732753826010Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753826010Cleaning up temporary data from old regions at 1732753826018 (+8 ms)Running coprocessor post-open hooks at 1732753826047 (+29 ms)Region opened successfully at 1732753826047 2024-11-28T00:30:26,047 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b., pid=201, masterSystemTime=1732753826004 2024-11-28T00:30:26,048 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad., pid=202, masterSystemTime=1732753826006 2024-11-28T00:30:26,051 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=199 updating hbase:meta row=aa92e9cd35a1b1216e0e190631ad4f4b, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:26,053 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,053 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=201}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,053 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:26,054 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=202}] handler.AssignRegionHandler(153): Opened testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:26,054 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=200 updating hbase:meta row=3a633d932d42bf1603d22a7430fcebad, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:26,055 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=201, ppid=199, state=RUNNABLE, hasLock=false; OpenRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:30:26,057 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=202, ppid=200, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:26,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=201, resume processing ppid=199 2024-11-28T00:30:26,059 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=201, ppid=199, state=SUCCESS, hasLock=false; OpenRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004 in 205 msec 2024-11-28T00:30:26,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=202, resume processing ppid=200 2024-11-28T00:30:26,061 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=202, ppid=200, state=SUCCESS, hasLock=false; OpenRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207 in 207 msec 2024-11-28T00:30:26,061 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=199, ppid=198, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, ASSIGN in 364 msec 2024-11-28T00:30:26,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=200, resume processing ppid=198 2024-11-28T00:30:26,064 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=200, ppid=198, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, ASSIGN in 366 msec 2024-11-28T00:30:26,065 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:30:26,065 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753826065"}]},"ts":"1732753826065"} 2024-11-28T00:30:26,067 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=ENABLED in hbase:meta 2024-11-28T00:30:26,068 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=198, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testEmptyExportFileSystemState execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:30:26,068 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testEmptyExportFileSystemState jenkins: RWXCA 2024-11-28T00:30:26,072 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:30:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:26,074 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,078 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF\x0AK\x0A\x07jenkins\x12@\x08\x03"<\x0A0\x0A\x07default\x12%testtb-testEmptyExportFileSystemState \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,079 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:26,081 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=198, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testEmptyExportFileSystemState in 435 msec 2024-11-28T00:30:26,271 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=198 2024-11-28T00:30:26,272 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:30:26,272 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:26,275 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-28T00:30:26,275 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,275 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:26,278 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:26,285 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:26,292 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:26,296 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:30:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753826296 (current time:1732753826296). 2024-11-28T00:30:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:30:26,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@294ce2a9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:26,297 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:26,299 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:26,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:26,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:26,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@73aee148, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:26,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:26,300 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,301 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46762, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:26,302 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@17ebb5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:26,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:26,303 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:26,307 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60224, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:26,308 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:26,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,309 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,310 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:26,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5ce7b926, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:26,325 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:26,326 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:26,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:26,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:26,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b891dc5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,326 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:26,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:26,327 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,328 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46780, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:26,328 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24cc0b68, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:26,329 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:26,330 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:26,331 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:26,332 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60232, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:26,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:26,334 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:26,336 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49600, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:26,338 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:26,338 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:26,339 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:26,339 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:30:26,340 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:30:26,342 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-28T00:30:26,343 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:30:26,344 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:26,345 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:26,348 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:26,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:30:26,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:30:26,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742274_1450 (size=185) 2024-11-28T00:30:26,381 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:26,381 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b}, {pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad}] 2024-11-28T00:30:26,382 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,383 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,452 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:30:26,535 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=204 2024-11-28T00:30:26,535 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.HRegion(2603): Flush status journal for aa92e9cd35a1b1216e0e190631ad4f4b: 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:26,536 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=205 2024-11-28T00:30:26,536 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:26,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.HRegion(2603): Flush status journal for 3a633d932d42bf1603d22a7430fcebad: 2024-11-28T00:30:26,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. for emptySnaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:30:26,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.' region-info for snapshot=emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:26,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:26,537 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:30:26,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:30:26,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:26,546 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=204}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=204 2024-11-28T00:30:26,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742275_1451 (size=76) 2024-11-28T00:30:26,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:30:26,547 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=204 2024-11-28T00:30:26,547 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:30:26,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742276_1452 (size=76) 2024-11-28T00:30:26,547 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=204, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:26,549 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=204, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b in 167 msec 2024-11-28T00:30:26,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:30:26,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:26,948 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=205}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=205 2024-11-28T00:30:26,948 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=205 2024-11-28T00:30:26,948 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testEmptyExportFileSystemState on region 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,949 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=205, ppid=203, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:26,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=205, resume processing ppid=203 2024-11-28T00:30:26,952 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=205, ppid=203, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad in 569 msec 2024-11-28T00:30:26,952 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:26,952 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:26,953 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:26,953 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:26,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:26,954 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:30:26,959 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:30:26,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:30:26,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742277_1453 (size=68) 2024-11-28T00:30:26,961 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:26,961 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:26,961 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:30:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:30:26,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742278_1454 (size=673) 2024-11-28T00:30:26,970 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:30:26,974 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:26,978 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:26,979 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:26,980 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=203, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:26,980 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 203 2024-11-28T00:30:26,981 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=203, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=203, snapshot={ ss=emptySnaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 640 msec 2024-11-28T00:30:27,072 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testEmptyExportFileSystemState' 2024-11-28T00:30:27,481 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=203 2024-11-28T00:30:27,481 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:30:27,488 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:27,490 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:27,491 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:27,494 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testEmptyExportFileSystemState 2024-11-28T00:30:27,494 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:27,494 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:27,495 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:27,500 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:27,505 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testEmptyExportFileSystemState,, stopping at row=testtb-testEmptyExportFileSystemState ,, for max=2147483647 with caching=100 2024-11-28T00:30:27,507 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:30:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753827507 (current time:1732753827507). 2024-11-28T00:30:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testEmptyExportFileSystemState VERSION not specified, setting to 2 2024-11-28T00:30:27,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d15a83b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:27,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:27,509 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2fde7890, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:27,509 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,510 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46802, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:27,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5961fb1b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,511 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:27,511 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:27,512 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:27,512 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60242, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:27,513 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,513 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,513 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@50720cda, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:27,514 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:27,515 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:27,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:27,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:27,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1825024f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:27,515 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:27,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,516 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46820, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:27,516 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a1829b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:27,517 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:27,517 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:27,518 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:27,518 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60258, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:27,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testEmptyExportFileSystemState', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:27,520 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:27,520 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49602, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:27,521 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,521 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:27,521 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testEmptyExportFileSystemState], kv [jenkins: RWXCA] 2024-11-28T00:30:27,522 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } 2024-11-28T00:30:27,523 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-11-28T00:30:27,524 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:30:27,524 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:27,525 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:27,527 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:27,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:30:27,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:30:27,533 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742279_1455 (size=180) 2024-11-28T00:30:27,534 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:27,534 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b}, {pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad}] 2024-11-28T00:30:27,535 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:27,535 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:27,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:30:27,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=208 2024-11-28T00:30:27,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:27,687 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=207 2024-11-28T00:30:27,687 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2902): Flushing 3a633d932d42bf1603d22a7430fcebad 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:30:27,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:27,688 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2902): Flushing aa92e9cd35a1b1216e0e190631ad4f4b 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:30:27,708 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b is 71, key is 022a8df0dd6d073612ed6fa2226f4488/cf:q/1732753827488/Put/seqid=0 2024-11-28T00:30:27,712 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad is 71, key is 1444fe25dd50f9d6b8d29b60a817a869/cf:q/1732753827490/Put/seqid=0 2024-11-28T00:30:27,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742281_1457 (size=8241) 2024-11-28T00:30:27,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742281_1457 (size=8241) 2024-11-28T00:30:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742281_1457 (size=8241) 2024-11-28T00:30:27,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742280_1456 (size=5032) 2024-11-28T00:30:27,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742280_1456 (size=5032) 2024-11-28T00:30:27,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742280_1456 (size=5032) 2024-11-28T00:30:27,722 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:27,724 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:27,735 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:27,736 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:27,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/.tmp/cf/e71ccb6f738a4575b4f727a7f12a61d3, store: [table=testtb-testEmptyExportFileSystemState family=cf region=3a633d932d42bf1603d22a7430fcebad] 2024-11-28T00:30:27,737 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/.tmp/cf/e71ccb6f738a4575b4f727a7f12a61d3 is 214, key is 1a13c36e1f5f17fb3914cd6d1f1123c4c/cf:q/1732753827490/Put/seqid=0 2024-11-28T00:30:27,744 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/.tmp/cf/d69f1a38f72444b4beaadba0919342cd, store: [table=testtb-testEmptyExportFileSystemState family=cf region=aa92e9cd35a1b1216e0e190631ad4f4b] 2024-11-28T00:30:27,745 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/.tmp/cf/d69f1a38f72444b4beaadba0919342cd is 214, key is 0a55dde82befbaaab8c589c1aa7a8632c/cf:q/1732753827488/Put/seqid=0 2024-11-28T00:30:27,753 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742282_1458 (size=15447) 2024-11-28T00:30:27,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742282_1458 (size=15447) 2024-11-28T00:30:27,755 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742282_1458 (size=15447) 2024-11-28T00:30:27,755 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/.tmp/cf/e71ccb6f738a4575b4f727a7f12a61d3 2024-11-28T00:30:27,761 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/.tmp/cf/e71ccb6f738a4575b4f727a7f12a61d3 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3 2024-11-28T00:30:27,766 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3, entries=48, sequenceid=6, filesize=15.1 K 2024-11-28T00:30:27,767 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 3a633d932d42bf1603d22a7430fcebad in 80ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:27,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.HRegion(2603): Flush status journal for 3a633d932d42bf1603d22a7430fcebad: 2024-11-28T00:30:27,767 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:30:27,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:27,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3] hfiles 2024-11-28T00:30:27,768 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3 for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742283_1459 (size=5724) 2024-11-28T00:30:27,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742283_1459 (size=5724) 2024-11-28T00:30:27,773 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742283_1459 (size=5724) 2024-11-28T00:30:27,774 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/.tmp/cf/d69f1a38f72444b4beaadba0919342cd 2024-11-28T00:30:27,779 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:30:27,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:30:27,780 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/.tmp/cf/d69f1a38f72444b4beaadba0919342cd as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd 2024-11-28T00:30:27,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742284_1460 (size=115) 2024-11-28T00:30:27,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:27,783 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=208}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=208 2024-11-28T00:30:27,784 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=208 2024-11-28T00:30:27,784 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:27,784 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=208, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:27,786 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:30:27,786 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=208, ppid=206, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 3a633d932d42bf1603d22a7430fcebad in 251 msec 2024-11-28T00:30:27,787 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for aa92e9cd35a1b1216e0e190631ad4f4b in 99ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.HRegion(2603): Flush status journal for aa92e9cd35a1b1216e0e190631ad4f4b: 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. for snaptb0-testEmptyExportFileSystemState completed. 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(241): Storing 'testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.' region-info for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd] hfiles 2024-11-28T00:30:27,787 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd for snapshot=snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:30:27,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:30:27,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742285_1461 (size=115) 2024-11-28T00:30:27,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:27,804 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=207}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=207 2024-11-28T00:30:27,804 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=207 2024-11-28T00:30:27,804 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testEmptyExportFileSystemState on region aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:27,804 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=207, ppid=206, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:27,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=207, resume processing ppid=206 2024-11-28T00:30:27,807 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:27,807 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=207, ppid=206, state=SUCCESS, hasLock=false; SnapshotRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b in 271 msec 2024-11-28T00:30:27,807 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:27,808 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:27,808 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:27,809 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:27,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b] hfiles 2024-11-28T00:30:27,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:27,810 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:27,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:30:27,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:30:27,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742286_1462 (size=299) 2024-11-28T00:30:27,817 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:27,817 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,817 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,827 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:30:27,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:30:27,828 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742287_1463 (size=983) 2024-11-28T00:30:27,831 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:27,836 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:27,837 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testEmptyExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,838 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=206, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:27,838 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 }, snapshot procedure id = 206 2024-11-28T00:30:27,839 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=206, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=206, snapshot={ ss=snaptb0-testEmptyExportFileSystemState table=testtb-testEmptyExportFileSystemState type=FLUSH ttl=0 } in 316 msec 2024-11-28T00:30:27,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=206 2024-11-28T00:30:27,841 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:30:27,841 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841 2024-11-28T00:30:27,841 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:27,870 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:27,870 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,872 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:30:27,875 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/.tmp/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:27,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:30:27,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:30:27,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:30:27,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742289_1465 (size=673) 2024-11-28T00:30:27,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:30:27,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742288_1464 (size=185) 2024-11-28T00:30:27,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:27,886 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:27,887 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-6820785954001262104.jar 2024-11-28T00:30:28,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,884 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-6752577774917858287.jar 2024-11-28T00:30:28,950 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,951 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:28,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:30:28,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:30:28,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:30:28,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:30:28,952 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:30:28,953 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:30:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:28,954 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:28,955 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:28,955 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:29,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742290_1466 (size=6424743) 2024-11-28T00:30:29,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742290_1466 (size=6424743) 2024-11-28T00:30:29,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742290_1466 (size=6424743) 2024-11-28T00:30:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742291_1467 (size=131440) 2024-11-28T00:30:29,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742291_1467 (size=131440) 2024-11-28T00:30:29,028 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742291_1467 (size=131440) 2024-11-28T00:30:29,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742292_1468 (size=4188619) 2024-11-28T00:30:29,043 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742292_1468 (size=4188619) 2024-11-28T00:30:29,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742292_1468 (size=4188619) 2024-11-28T00:30:29,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742293_1469 (size=440957) 2024-11-28T00:30:29,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742293_1469 (size=440957) 2024-11-28T00:30:29,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742293_1469 (size=440957) 2024-11-28T00:30:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742294_1470 (size=1323991) 2024-11-28T00:30:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742294_1470 (size=1323991) 2024-11-28T00:30:29,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742294_1470 (size=1323991) 2024-11-28T00:30:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742295_1471 (size=903927) 2024-11-28T00:30:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742295_1471 (size=903927) 2024-11-28T00:30:29,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742295_1471 (size=903927) 2024-11-28T00:30:29,098 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742296_1472 (size=8360083) 2024-11-28T00:30:29,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742296_1472 (size=8360083) 2024-11-28T00:30:29,099 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742296_1472 (size=8360083) 2024-11-28T00:30:29,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742297_1473 (size=1877034) 2024-11-28T00:30:29,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742297_1473 (size=1877034) 2024-11-28T00:30:29,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742297_1473 (size=1877034) 2024-11-28T00:30:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742298_1474 (size=77835) 2024-11-28T00:30:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742298_1474 (size=77835) 2024-11-28T00:30:29,121 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742298_1474 (size=77835) 2024-11-28T00:30:29,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742299_1475 (size=30949) 2024-11-28T00:30:29,127 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742299_1475 (size=30949) 2024-11-28T00:30:29,128 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742299_1475 (size=30949) 2024-11-28T00:30:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742300_1476 (size=1597213) 2024-11-28T00:30:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742300_1476 (size=1597213) 2024-11-28T00:30:29,143 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742300_1476 (size=1597213) 2024-11-28T00:30:29,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742301_1477 (size=4695811) 2024-11-28T00:30:29,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742301_1477 (size=4695811) 2024-11-28T00:30:29,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742301_1477 (size=4695811) 2024-11-28T00:30:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742302_1478 (size=232957) 2024-11-28T00:30:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742302_1478 (size=232957) 2024-11-28T00:30:29,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742302_1478 (size=232957) 2024-11-28T00:30:29,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742303_1479 (size=127628) 2024-11-28T00:30:29,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742303_1479 (size=127628) 2024-11-28T00:30:29,183 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742303_1479 (size=127628) 2024-11-28T00:30:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742304_1480 (size=20406) 2024-11-28T00:30:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742304_1480 (size=20406) 2024-11-28T00:30:29,190 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742304_1480 (size=20406) 2024-11-28T00:30:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742305_1481 (size=5175431) 2024-11-28T00:30:29,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742305_1481 (size=5175431) 2024-11-28T00:30:29,209 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742305_1481 (size=5175431) 2024-11-28T00:30:29,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742306_1482 (size=217634) 2024-11-28T00:30:29,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742306_1482 (size=217634) 2024-11-28T00:30:29,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742306_1482 (size=217634) 2024-11-28T00:30:29,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742307_1483 (size=1832290) 2024-11-28T00:30:29,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742307_1483 (size=1832290) 2024-11-28T00:30:29,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742307_1483 (size=1832290) 2024-11-28T00:30:29,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742308_1484 (size=322274) 2024-11-28T00:30:29,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742308_1484 (size=322274) 2024-11-28T00:30:29,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742308_1484 (size=322274) 2024-11-28T00:30:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742309_1485 (size=503880) 2024-11-28T00:30:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742309_1485 (size=503880) 2024-11-28T00:30:29,242 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742309_1485 (size=503880) 2024-11-28T00:30:29,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742310_1486 (size=29229) 2024-11-28T00:30:29,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742310_1486 (size=29229) 2024-11-28T00:30:29,250 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742310_1486 (size=29229) 2024-11-28T00:30:29,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742311_1487 (size=24096) 2024-11-28T00:30:29,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742311_1487 (size=24096) 2024-11-28T00:30:29,255 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742311_1487 (size=24096) 2024-11-28T00:30:29,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742312_1488 (size=111872) 2024-11-28T00:30:29,261 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742312_1488 (size=111872) 2024-11-28T00:30:29,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742312_1488 (size=111872) 2024-11-28T00:30:29,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742313_1489 (size=45609) 2024-11-28T00:30:29,267 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742313_1489 (size=45609) 2024-11-28T00:30:29,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742313_1489 (size=45609) 2024-11-28T00:30:29,273 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742314_1490 (size=136454) 2024-11-28T00:30:29,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742314_1490 (size=136454) 2024-11-28T00:30:29,274 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742314_1490 (size=136454) 2024-11-28T00:30:29,275 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:30:29,276 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'emptySnaptb0-testEmptyExportFileSystemState' hfile list 2024-11-28T00:30:29,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:30:29,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:30:29,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742315_1491 (size=7) 2024-11-28T00:30:29,287 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:30:29,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:30:29,288 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742316_1492 (size=10) 2024-11-28T00:30:29,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742317_1493 (size=303899) 2024-11-28T00:30:29,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742317_1493 (size=303899) 2024-11-28T00:30:29,300 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742317_1493 (size=303899) 2024-11-28T00:30:29,329 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:30:29,329 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:30:30,103 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0008_000001 (auth:SIMPLE) from 127.0.0.1:54994 2024-11-28T00:30:30,812 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:30:33,956 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:30:35,389 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0008_000001 (auth:SIMPLE) from 127.0.0.1:56712 2024-11-28T00:30:35,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-28T00:30:35,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState Metrics about Tables on a single HBase RegionServer 2024-11-28T00:30:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742318_1494 (size=349573) 2024-11-28T00:30:35,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742318_1494 (size=349573) 2024-11-28T00:30:35,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742318_1494 (size=349573) 2024-11-28T00:30:36,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:30:36,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:30:36,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742319_1495 (size=8568) 2024-11-28T00:30:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:30:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:30:36,527 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742320_1496 (size=460) 2024-11-28T00:30:36,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:30:36,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:30:36,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742321_1497 (size=8568) 2024-11-28T00:30:36,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742322_1498 (size=349573) 2024-11-28T00:30:36,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742322_1498 (size=349573) 2024-11-28T00:30:36,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742322_1498 (size=349573) 2024-11-28T00:30:38,461 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:30:38,462 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:30:38,471 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:38,471 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:30:38,471 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:30:38,471 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:38,472 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-28T00:30:38,472 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-28T00:30:38,472 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:38,473 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/.snapshotinfo 2024-11-28T00:30:38,473 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753827841/.hbase-snapshot/emptySnaptb0-testEmptyExportFileSystemState/data.manifest 2024-11-28T00:30:38,484 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=209, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,488 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-11-28T00:30:38,489 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753838489"}]},"ts":"1732753838489"} 2024-11-28T00:30:38,491 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLING in hbase:meta 2024-11-28T00:30:38,491 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testEmptyExportFileSystemState to state=DISABLING 2024-11-28T00:30:38,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=210, ppid=209, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState}] 2024-11-28T00:30:38,493 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, UNASSIGN}, {pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, UNASSIGN}] 2024-11-28T00:30:38,494 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, UNASSIGN 2024-11-28T00:30:38,494 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, UNASSIGN 2024-11-28T00:30:38,495 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=212 updating hbase:meta row=3a633d932d42bf1603d22a7430fcebad, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:38,496 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=211 updating hbase:meta row=aa92e9cd35a1b1216e0e190631ad4f4b, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:30:38,498 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=212, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, UNASSIGN because future has completed 2024-11-28T00:30:38,499 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:38,499 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=213, ppid=212, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:38,501 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=211, ppid=210, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, UNASSIGN because future has completed 2024-11-28T00:30:38,501 DEBUG [PEWorker-5 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:30:38,501 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=214, ppid=211, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:30:38,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-11-28T00:30:38,652 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(122): Close 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:38,652 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:38,652 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1722): Closing 3a633d932d42bf1603d22a7430fcebad, disabling compactions & flushes 2024-11-28T00:30:38,652 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:38,652 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:38,652 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. after waiting 0 ms 2024-11-28T00:30:38,652 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:38,653 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(122): Close aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:38,653 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:30:38,653 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1722): Closing aa92e9cd35a1b1216e0e190631ad4f4b, disabling compactions & flushes 2024-11-28T00:30:38,653 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1755): Closing region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:38,653 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:38,653 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1843): Acquired close lock on testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. after waiting 0 ms 2024-11-28T00:30:38,653 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1853): Updates disabled for region testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:38,660 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:38,661 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:38,661 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad. 2024-11-28T00:30:38,661 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] regionserver.HRegion(1676): Region close journal for 3a633d932d42bf1603d22a7430fcebad: Waiting for close lock at 1732753838652Running coprocessor pre-close hooks at 1732753838652Disabling compacts and flushes for region at 1732753838652Disabling writes for close at 1732753838652Writing region close event to WAL at 1732753838657 (+5 ms)Running coprocessor post-close hooks at 1732753838661 (+4 ms)Closed at 1732753838661 2024-11-28T00:30:38,663 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=213}] handler.UnassignRegionHandler(157): Closed 3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:38,664 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=212 updating hbase:meta row=3a633d932d42bf1603d22a7430fcebad, regionState=CLOSED 2024-11-28T00:30:38,666 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=213, ppid=212, state=RUNNABLE, hasLock=false; CloseRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:38,668 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:30:38,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:30:38,669 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1973): Closed testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b. 2024-11-28T00:30:38,669 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] regionserver.HRegion(1676): Region close journal for aa92e9cd35a1b1216e0e190631ad4f4b: Waiting for close lock at 1732753838653Running coprocessor pre-close hooks at 1732753838653Disabling compacts and flushes for region at 1732753838653Disabling writes for close at 1732753838653Writing region close event to WAL at 1732753838660 (+7 ms)Running coprocessor post-close hooks at 1732753838668 (+8 ms)Closed at 1732753838669 (+1 ms) 2024-11-28T00:30:38,674 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=214}] handler.UnassignRegionHandler(157): Closed aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:38,675 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=211 updating hbase:meta row=aa92e9cd35a1b1216e0e190631ad4f4b, regionState=CLOSED 2024-11-28T00:30:38,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=213, resume processing ppid=212 2024-11-28T00:30:38,675 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=213, ppid=212, state=SUCCESS, hasLock=false; CloseRegionProcedure 3a633d932d42bf1603d22a7430fcebad, server=72c8c7f04009,41621,1732753506207 in 168 msec 2024-11-28T00:30:38,676 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=214, ppid=211, state=RUNNABLE, hasLock=false; CloseRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:30:38,677 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=212, ppid=210, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=3a633d932d42bf1603d22a7430fcebad, UNASSIGN in 182 msec 2024-11-28T00:30:38,681 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=214, resume processing ppid=211 2024-11-28T00:30:38,681 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=214, ppid=211, state=SUCCESS, hasLock=false; CloseRegionProcedure aa92e9cd35a1b1216e0e190631ad4f4b, server=72c8c7f04009,43335,1732753506004 in 176 msec 2024-11-28T00:30:38,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=211, resume processing ppid=210 2024-11-28T00:30:38,683 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=211, ppid=210, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testEmptyExportFileSystemState, region=aa92e9cd35a1b1216e0e190631ad4f4b, UNASSIGN in 188 msec 2024-11-28T00:30:38,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=210, resume processing ppid=209 2024-11-28T00:30:38,685 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=210, ppid=209, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testEmptyExportFileSystemState in 191 msec 2024-11-28T00:30:38,686 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753838686"}]},"ts":"1732753838686"} 2024-11-28T00:30:38,688 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testEmptyExportFileSystemState, state=DISABLED in hbase:meta 2024-11-28T00:30:38,688 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(296): Set testtb-testEmptyExportFileSystemState to state=DISABLED 2024-11-28T00:30:38,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=209, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testEmptyExportFileSystemState in 205 msec 2024-11-28T00:30:38,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=209 2024-11-28T00:30:38,801 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:30:38,802 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,803 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=215, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,803 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,804 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=215, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,806 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,808 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:38,808 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:38,809 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/recovered.edits] 2024-11-28T00:30:38,809 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/recovered.edits] 2024-11-28T00:30:38,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,810 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:30:38,811 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:30:38,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:30:38,812 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testEmptyExportFileSystemState with data PBUF 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,813 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:38,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=215 2024-11-28T00:30:38,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:38,814 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/cf/d69f1a38f72444b4beaadba0919342cd 2024-11-28T00:30:38,814 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:38,815 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:38,815 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:38,816 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/cf/e71ccb6f738a4575b4f727a7f12a61d3 2024-11-28T00:30:38,821 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b/recovered.edits/9.seqid 2024-11-28T00:30:38,821 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad/recovered.edits/9.seqid 2024-11-28T00:30:38,821 DEBUG [HFileArchiver-23 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:38,821 DEBUG [HFileArchiver-18 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testEmptyExportFileSystemState/3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:38,821 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(313): Archived testtb-testEmptyExportFileSystemState regions 2024-11-28T00:30:38,822 DEBUG [PEWorker-4 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-28T00:30:38,823 DEBUG [PEWorker-4 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf] 2024-11-28T00:30:38,826 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/c4ca4238a0b923820dcc509a6f75849b20241128695af86e56924132810588f5c22423d6_3a633d932d42bf1603d22a7430fcebad 2024-11-28T00:30:38,827 DEBUG [PEWorker-4 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0/cf/d41d8cd98f00b204e9800998ecf8427e20241128ab67adca96c7401b8f5e8961d29ac3f2_aa92e9cd35a1b1216e0e190631ad4f4b 2024-11-28T00:30:38,827 DEBUG [PEWorker-4 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testEmptyExportFileSystemState/37e3865d2ff7e5e2da33a03e3f723df0 2024-11-28T00:30:38,830 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=215, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,832 WARN [PEWorker-4 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testEmptyExportFileSystemState from hbase:meta 2024-11-28T00:30:38,834 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testEmptyExportFileSystemState' descriptor. 2024-11-28T00:30:38,836 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=215, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,836 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testEmptyExportFileSystemState' from region states. 2024-11-28T00:30:38,836 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753838836"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:38,836 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753838836"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:38,838 INFO [PEWorker-4 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:30:38,838 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => aa92e9cd35a1b1216e0e190631ad4f4b, NAME => 'testtb-testEmptyExportFileSystemState,,1732753825642.aa92e9cd35a1b1216e0e190631ad4f4b.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 3a633d932d42bf1603d22a7430fcebad, NAME => 'testtb-testEmptyExportFileSystemState,1,1732753825642.3a633d932d42bf1603d22a7430fcebad.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:30:38,839 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testEmptyExportFileSystemState' as deleted. 2024-11-28T00:30:38,839 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testEmptyExportFileSystemState","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753838839"}]},"ts":"9223372036854775807"} 2024-11-28T00:30:38,840 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testEmptyExportFileSystemState state from META 2024-11-28T00:30:38,841 DEBUG [PEWorker-4 {}] procedure.DeleteTableProcedure(133): Finished pid=215, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,843 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=215, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testEmptyExportFileSystemState in 39 msec 2024-11-28T00:30:38,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=215 2024-11-28T00:30:38,921 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testEmptyExportFileSystemState 2024-11-28T00:30:38,921 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testEmptyExportFileSystemState completed 2024-11-28T00:30:38,927 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-28T00:30:38,929 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:38,930 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testEmptyExportFileSystemState" type: DISABLED 2024-11-28T00:30:38,931 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testEmptyExportFileSystemState 2024-11-28T00:30:38,957 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testEmptyExportFileSystemState Thread=813 (was 797) Potentially hanging thread: HFileArchiver-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:38561 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42845 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 11651) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:34415 from appattempt_1732753513471_0008_000001 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:34415 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:50076 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:49574 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_457870256_1 at /127.0.0.1:45028 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #14 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:42845 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Thread-7324 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:45074 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=794 (was 771) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=767 (was 778), ProcessCount=17 (was 14) - ProcessCount LEAK? -, AvailableMemoryMB=2746 (was 3251) 2024-11-28T00:30:38,957 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-28T00:30:38,977 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=813, OpenFileDescriptor=794, MaxFileDescriptor=1048576, SystemLoadAverage=767, ProcessCount=17, AvailableMemoryMB=2745 2024-11-28T00:30:38,977 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=813 is superior to 500 2024-11-28T00:30:38,979 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:30:38,980 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=216, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:30:38,981 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:30:38,981 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportWithChecksum" procId is: 216 2024-11-28T00:30:38,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-11-28T00:30:38,982 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:30:38,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:30:38,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:30:38,995 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742323_1499 (size=440) 2024-11-28T00:30:38,997 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 308b34fbec8bee883c7512d318852c3f, NAME => 'testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:38,998 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 6b36346bd216c8d9f6f6904a7bb7bfcf, NAME => 'testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportWithChecksum', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:39,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:30:39,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:30:39,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742324_1500 (size=65) 2024-11-28T00:30:39,018 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:39,019 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1722): Closing 308b34fbec8bee883c7512d318852c3f, disabling compactions & flushes 2024-11-28T00:30:39,019 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,019 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,019 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. after waiting 0 ms 2024-11-28T00:30:39,019 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,019 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,019 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-0 {}] regionserver.HRegion(1676): Region close journal for 308b34fbec8bee883c7512d318852c3f: Waiting for close lock at 1732753839019Disabling compacts and flushes for region at 1732753839019Disabling writes for close at 1732753839019Writing region close event to WAL at 1732753839019Closed at 1732753839019 2024-11-28T00:30:39,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:30:39,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:30:39,020 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742325_1501 (size=65) 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1722): Closing 6b36346bd216c8d9f6f6904a7bb7bfcf, disabling compactions & flushes 2024-11-28T00:30:39,021 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. after waiting 0 ms 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,021 INFO [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,021 DEBUG [RegionOpenAndInit-testtb-testExportWithChecksum-pool-1 {}] regionserver.HRegion(1676): Region close journal for 6b36346bd216c8d9f6f6904a7bb7bfcf: Waiting for close lock at 1732753839021Disabling compacts and flushes for region at 1732753839021Disabling writes for close at 1732753839021Writing region close event to WAL at 1732753839021Closed at 1732753839021 2024-11-28T00:30:39,022 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:30:39,023 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753839022"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753839022"}]},"ts":"1732753839022"} 2024-11-28T00:30:39,023 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.","families":{"info":[{"qualifier":"regioninfo","vlen":64,"tag":[],"timestamp":"1732753839022"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753839022"}]},"ts":"1732753839022"} 2024-11-28T00:30:39,025 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:30:39,026 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:30:39,026 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753839026"}]},"ts":"1732753839026"} 2024-11-28T00:30:39,027 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLING in hbase:meta 2024-11-28T00:30:39,027 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:30:39,029 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:30:39,029 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:30:39,029 INFO [PEWorker-3 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:30:39,029 DEBUG [PEWorker-3 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:30:39,029 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, ASSIGN}, {pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, ASSIGN}] 2024-11-28T00:30:39,030 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, ASSIGN 2024-11-28T00:30:39,030 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, ASSIGN 2024-11-28T00:30:39,031 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:30:39,031 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, ASSIGN; state=OFFLINE, location=72c8c7f04009,41621,1732753506207; forceNewPlan=false, retain=false 2024-11-28T00:30:39,091 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-11-28T00:30:39,181 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:30:39,182 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=217 updating hbase:meta row=308b34fbec8bee883c7512d318852c3f, regionState=OPENING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:39,182 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=218 updating hbase:meta row=6b36346bd216c8d9f6f6904a7bb7bfcf, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:39,185 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=217, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, ASSIGN because future has completed 2024-11-28T00:30:39,185 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=219, ppid=217, state=RUNNABLE, hasLock=false; OpenRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:30:39,186 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=218, ppid=216, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, ASSIGN because future has completed 2024-11-28T00:30:39,187 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=220, ppid=218, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:30:39,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-11-28T00:30:39,342 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,342 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] handler.AssignRegionHandler(132): Open testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,342 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7752): Opening region: {ENCODED => 6b36346bd216c8d9f6f6904a7bb7bfcf, NAME => 'testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:30:39,342 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7752): Opening region: {ENCODED => 308b34fbec8bee883c7512d318852c3f, NAME => 'testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. service=AccessControlService 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. service=AccessControlService 2024-11-28T00:30:39,343 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:39,343 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportWithChecksum 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(898): Instantiated testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7794): checking encryption for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(7797): checking classloading for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7794): checking encryption for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,343 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(7797): checking classloading for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,344 INFO [StoreOpener-308b34fbec8bee883c7512d318852c3f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,344 INFO [StoreOpener-6b36346bd216c8d9f6f6904a7bb7bfcf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,346 INFO [StoreOpener-308b34fbec8bee883c7512d318852c3f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 308b34fbec8bee883c7512d318852c3f columnFamilyName cf 2024-11-28T00:30:39,346 INFO [StoreOpener-6b36346bd216c8d9f6f6904a7bb7bfcf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 6b36346bd216c8d9f6f6904a7bb7bfcf columnFamilyName cf 2024-11-28T00:30:39,347 DEBUG [StoreOpener-6b36346bd216c8d9f6f6904a7bb7bfcf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:39,347 DEBUG [StoreOpener-308b34fbec8bee883c7512d318852c3f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:39,347 INFO [StoreOpener-308b34fbec8bee883c7512d318852c3f-1 {}] regionserver.HStore(327): Store=308b34fbec8bee883c7512d318852c3f/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:39,347 INFO [StoreOpener-6b36346bd216c8d9f6f6904a7bb7bfcf-1 {}] regionserver.HStore(327): Store=6b36346bd216c8d9f6f6904a7bb7bfcf/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:30:39,347 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1038): replaying wal for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,348 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1038): replaying wal for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,348 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,348 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,348 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,349 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,349 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1048): stopping wal replay for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,349 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1060): Cleaning up temporary data for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,349 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1048): stopping wal replay for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,349 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1060): Cleaning up temporary data for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,351 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1093): writing seq id for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,351 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1093): writing seq id for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,353 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:39,353 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1114): Opened 308b34fbec8bee883c7512d318852c3f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=71724751, jitterRate=0.06878207623958588}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:39,353 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:30:39,353 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,354 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegion(1006): Region open journal for 308b34fbec8bee883c7512d318852c3f: Running coprocessor pre-open hook at 1732753839343Writing region info on filesystem at 1732753839343Initializing all the Stores at 1732753839344 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753839344Cleaning up temporary data from old regions at 1732753839349 (+5 ms)Running coprocessor post-open hooks at 1732753839353 (+4 ms)Region opened successfully at 1732753839354 (+1 ms) 2024-11-28T00:30:39,355 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f., pid=219, masterSystemTime=1732753839339 2024-11-28T00:30:39,357 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,357 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=219}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,357 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=217 updating hbase:meta row=308b34fbec8bee883c7512d318852c3f, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:30:39,359 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=219, ppid=217, state=RUNNABLE, hasLock=false; OpenRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:30:39,360 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1114): Opened 6b36346bd216c8d9f6f6904a7bb7bfcf; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=60825353, jitterRate=-0.09363161027431488}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:30:39,360 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,360 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegion(1006): Region open journal for 6b36346bd216c8d9f6f6904a7bb7bfcf: Running coprocessor pre-open hook at 1732753839343Writing region info on filesystem at 1732753839343Initializing all the Stores at 1732753839344 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753839344Cleaning up temporary data from old regions at 1732753839349 (+5 ms)Running coprocessor post-open hooks at 1732753839360 (+11 ms)Region opened successfully at 1732753839360 2024-11-28T00:30:39,361 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf., pid=220, masterSystemTime=1732753839339 2024-11-28T00:30:39,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=219, resume processing ppid=217 2024-11-28T00:30:39,363 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=219, ppid=217, state=SUCCESS, hasLock=false; OpenRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207 in 175 msec 2024-11-28T00:30:39,363 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,363 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=220}] handler.AssignRegionHandler(153): Opened testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,364 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=217, ppid=216, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, ASSIGN in 334 msec 2024-11-28T00:30:39,364 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=218 updating hbase:meta row=6b36346bd216c8d9f6f6904a7bb7bfcf, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:30:39,366 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=220, ppid=218, state=RUNNABLE, hasLock=false; OpenRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:30:39,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=220, resume processing ppid=218 2024-11-28T00:30:39,369 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=220, ppid=218, state=SUCCESS, hasLock=false; OpenRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332 in 180 msec 2024-11-28T00:30:39,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=218, resume processing ppid=216 2024-11-28T00:30:39,370 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=218, ppid=216, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, ASSIGN in 340 msec 2024-11-28T00:30:39,371 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:30:39,371 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753839371"}]},"ts":"1732753839371"} 2024-11-28T00:30:39,382 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=ENABLED in hbase:meta 2024-11-28T00:30:39,383 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=216, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportWithChecksum execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:30:39,383 DEBUG [PEWorker-3 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportWithChecksum jenkins: RWXCA 2024-11-28T00:30:39,387 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:30:39,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:39,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:39,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:39,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:30:39,394 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=216, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportWithChecksum in 413 msec 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,399 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF\x0AC\x0A\x07jenkins\x128\x08\x03"4\x0A(\x0A\x07default\x12\x1Dtesttb-testExportWithChecksum \x00 \x01 \x02 \x03 \x04 2024-11-28T00:30:39,611 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=216 2024-11-28T00:30:39,612 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:30:39,612 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:39,615 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-28T00:30:39,615 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,615 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:39,617 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:39,622 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:39,634 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:39,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753839639 (current time:1732753839639). 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1b84df60, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,639 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:39,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:39,640 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@232acd81, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:39,641 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,642 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46644, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:39,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1ecc1763, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:39,643 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:39,644 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:39,645 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49480, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:39,645 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:39,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:39,645 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,646 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,646 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@339f6d2d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:39,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:39,653 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:39,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:39,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:39,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@537e0ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:39,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:39,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,655 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46662, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:39,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@24e6518c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:39,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:39,656 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:39,657 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:39,657 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49484, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:39,658 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:39,659 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:39,659 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54876, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:39,661 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:39,661 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:30:39,662 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:39,663 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:30:39,664 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-28T00:30:39,665 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:30:39,666 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:39,667 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:39,667 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:39,670 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:39,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:30:39,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:30:39,688 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742326_1502 (size=161) 2024-11-28T00:30:39,690 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:39,690 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f}, {pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf}] 2024-11-28T00:30:39,691 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,692 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,771 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:30:39,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=223 2024-11-28T00:30:39,844 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=222 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.HRegion(2603): Flush status journal for 6b36346bd216c8d9f6f6904a7bb7bfcf: 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.HRegion(2603): Flush status journal for 308b34fbec8bee883c7512d318852c3f: 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. for emptySnaptb0-testExportWithChecksum completed. 2024-11-28T00:30:39,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. for emptySnaptb0-testExportWithChecksum completed. 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.' region-info for snapshot=emptySnaptb0-testExportWithChecksum 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:39,845 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:30:39,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:30:39,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:30:39,876 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742327_1503 (size=68) 2024-11-28T00:30:39,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:39,877 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=222}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=222 2024-11-28T00:30:39,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=222 2024-11-28T00:30:39,878 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,878 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=222, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:39,881 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=222, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f in 189 msec 2024-11-28T00:30:39,889 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:30:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:30:39,890 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742328_1504 (size=68) 2024-11-28T00:30:39,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:39,897 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=223}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=223 2024-11-28T00:30:39,902 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=223 2024-11-28T00:30:39,902 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportWithChecksum on region 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,902 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=223, ppid=221, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:39,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=223, resume processing ppid=221 2024-11-28T00:30:39,906 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=223, ppid=221, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf in 213 msec 2024-11-28T00:30:39,906 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:39,907 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:39,908 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:39,908 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:39,908 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:39,909 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:30:39,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:30:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:30:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742329_1505 (size=60) 2024-11-28T00:30:39,928 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:39,928 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportWithChecksum 2024-11-28T00:30:39,929 DEBUG [PEWorker-1 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum 2024-11-28T00:30:39,951 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:30:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:30:39,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742330_1506 (size=641) 2024-11-28T00:30:39,961 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:39,966 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:39,966 DEBUG [PEWorker-1 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportWithChecksum to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportWithChecksum 2024-11-28T00:30:39,968 INFO [PEWorker-1 {}] procedure.SnapshotProcedure(134): pid=221, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:39,968 DEBUG [PEWorker-1 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 221 2024-11-28T00:30:39,970 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=221, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=221, snapshot={ ss=emptySnaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 306 msec 2024-11-28T00:30:39,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=221 2024-11-28T00:30:39,981 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:30:39,991 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41621 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:39,993 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:30:39,996 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:40,000 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportWithChecksum 2024-11-28T00:30:40,000 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:40,000 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:30:40,002 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:40,007 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:40,013 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportWithChecksum,, stopping at row=testtb-testExportWithChecksum ,, for max=2147483647 with caching=100 2024-11-28T00:30:40,016 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:30:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753840016 (current time:1732753840016). 2024-11-28T00:30:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:30:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportWithChecksum VERSION not specified, setting to 2 2024-11-28T00:30:40,016 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:30:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b67666f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:40,017 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:40,018 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:40,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:40,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:40,018 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15f0229f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:40,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:40,019 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,020 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46676, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:40,020 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3e7e8129, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,021 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:40,021 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:40,022 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:40,023 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49496, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:40,024 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,024 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,024 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1f6345b0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,025 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:30:40,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:30:40,026 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:30:40,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:30:40,026 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:30:40,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6ce9f9f8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:30:40,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:30:40,027 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,028 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46692, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:30:40,028 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e6d1eb8, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:30:40,029 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:30:40,030 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:30:40,031 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:40,032 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:40,033 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportWithChecksum', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:30:40,034 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:30:40,034 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54880, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:30:40,036 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:30:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:30:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:30:40,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportWithChecksum], kv [jenkins: RWXCA] 2024-11-28T00:30:40,037 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:30:40,038 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:30:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } 2024-11-28T00:30:40,038 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-11-28T00:30:40,039 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:30:40,039 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-28T00:30:40,040 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:30:40,042 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:30:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:30:40,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:30:40,051 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742331_1507 (size=156) 2024-11-28T00:30:40,052 INFO [PEWorker-5 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:30:40,052 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f}, {pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf}] 2024-11-28T00:30:40,053 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:40,053 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:40,141 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-28T00:30:40,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=226 2024-11-28T00:30:40,205 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41621 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=225 2024-11-28T00:30:40,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:40,205 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:40,205 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(2902): Flushing 308b34fbec8bee883c7512d318852c3f 1/1 column families, dataSize=132 B heapSize=544 B 2024-11-28T00:30:40,205 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(2902): Flushing 6b36346bd216c8d9f6f6904a7bb7bfcf 1/1 column families, dataSize=3.13 KB heapSize=7 KB 2024-11-28T00:30:40,229 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f is 71, key is 037c6d02cbbefe744e0902f33e2d167e/cf:q/1732753839991/Put/seqid=0 2024-11-28T00:30:40,235 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf is 71, key is 162410e7324579e6353abc495a3f1760/cf:q/1732753839993/Put/seqid=0 2024-11-28T00:30:40,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:30:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:30:40,236 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742332_1508 (size=5032) 2024-11-28T00:30:40,236 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:40,242 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:40,243 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/.tmp/cf/2589baf60cd54238a2e5d90fedd81569, store: [table=testtb-testExportWithChecksum family=cf region=308b34fbec8bee883c7512d318852c3f] 2024-11-28T00:30:40,244 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/.tmp/cf/2589baf60cd54238a2e5d90fedd81569 is 206, key is 0a8b6c33c75cfb09185798130b84a9a2a/cf:q/1732753839991/Put/seqid=0 2024-11-28T00:30:40,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742333_1509 (size=8242) 2024-11-28T00:30:40,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742333_1509 (size=8242) 2024-11-28T00:30:40,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742333_1509 (size=8242) 2024-11-28T00:30:40,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:30:40,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:30:40,254 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742334_1510 (size=5700) 2024-11-28T00:30:40,351 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-28T00:30:40,646 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:40,650 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:40,650 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/.tmp/cf/1288ff7b32c049be928fa529e3f90e07, store: [table=testtb-testExportWithChecksum family=cf region=6b36346bd216c8d9f6f6904a7bb7bfcf] 2024-11-28T00:30:40,651 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/.tmp/cf/1288ff7b32c049be928fa529e3f90e07 is 206, key is 10db4dbd3c6c59e4b5eaf465f54ba33a6/cf:q/1732753839993/Put/seqid=0 2024-11-28T00:30:40,652 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=132, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/.tmp/cf/2589baf60cd54238a2e5d90fedd81569 2024-11-28T00:30:40,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742335_1511 (size=15057) 2024-11-28T00:30:40,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742335_1511 (size=15057) 2024-11-28T00:30:40,657 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742335_1511 (size=15057) 2024-11-28T00:30:40,657 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/.tmp/cf/2589baf60cd54238a2e5d90fedd81569 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 2024-11-28T00:30:40,657 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/.tmp/cf/1288ff7b32c049be928fa529e3f90e07 2024-11-28T00:30:40,660 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-28T00:30:40,661 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/.tmp/cf/1288ff7b32c049be928fa529e3f90e07 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 2024-11-28T00:30:40,662 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569, entries=2, sequenceid=6, filesize=5.6 K 2024-11-28T00:30:40,663 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(3140): Finished flush of dataSize ~132 B/132, heapSize ~528 B/528, currentSize=0 B/0 for 308b34fbec8bee883c7512d318852c3f in 458ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:40,663 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportWithChecksum' 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.HRegion(2603): Flush status journal for 308b34fbec8bee883c7512d318852c3f: 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. for snaptb0-testExportWithChecksum completed. 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569] hfiles 2024-11-28T00:30:40,664 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:30:40,665 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07, entries=48, sequenceid=6, filesize=14.7 K 2024-11-28T00:30:40,666 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(3140): Finished flush of dataSize ~3.13 KB/3204, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 6b36346bd216c8d9f6f6904a7bb7bfcf in 461ms, sequenceid=6, compaction requested=false 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.HRegion(2603): Flush status journal for 6b36346bd216c8d9f6f6904a7bb7bfcf: 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. for snaptb0-testExportWithChecksum completed. 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.' region-info for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07] hfiles 2024-11-28T00:30:40,666 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 for snapshot=snaptb0-testExportWithChecksum 2024-11-28T00:30:40,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:30:40,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:30:40,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742336_1512 (size=107) 2024-11-28T00:30:40,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:30:40,675 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=225}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=225 2024-11-28T00:30:40,676 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=225 2024-11-28T00:30:40,676 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:40,676 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=225, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:40,678 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=225, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 308b34fbec8bee883c7512d318852c3f in 625 msec 2024-11-28T00:30:40,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:30:40,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:30:40,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742337_1513 (size=107) 2024-11-28T00:30:40,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:30:40,687 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=226}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=226 2024-11-28T00:30:40,688 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=226 2024-11-28T00:30:40,688 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportWithChecksum on region 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:40,688 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=226, ppid=224, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:40,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=226, resume processing ppid=224 2024-11-28T00:30:40,690 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=226, ppid=224, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf in 637 msec 2024-11-28T00:30:40,690 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:30:40,691 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:30:40,692 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:30:40,692 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:30:40,692 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:30:40,693 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f] hfiles 2024-11-28T00:30:40,693 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:30:40,693 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f 2024-11-28T00:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:30:40,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742338_1514 (size=291) 2024-11-28T00:30:40,701 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:30:40,701 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportWithChecksum 2024-11-28T00:30:40,702 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:30:40,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:30:40,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:30:40,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742339_1515 (size=951) 2024-11-28T00:30:40,712 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:30:40,716 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:30:40,716 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:30:40,718 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=224, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:30:40,718 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 }, snapshot procedure id = 224 2024-11-28T00:30:40,719 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=224, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=224, snapshot={ ss=snaptb0-testExportWithChecksum table=testtb-testExportWithChecksum type=FLUSH ttl=0 } in 681 msec 2024-11-28T00:30:41,171 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=224 2024-11-28T00:30:41,171 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:30:41,172 INFO [Time-limited test {}] snapshot.TestExportSnapshot(523): Local export destination path: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171 2024-11-28T00:30:41,172 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=file:///, tgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171, rawTgtDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:41,208 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:30:41,208 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=org.apache.hadoop.fs.LocalFileSystem@7d54afd5, outputRoot=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171, skipTmp=false, initialOutputSnapshotDir=file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:30:41,209 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:30:41,214 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum to file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:30:41,236 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:41,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:41,237 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-12284041442276023986.jar 2024-11-28T00:30:42,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,233 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-8700565673386221129.jar 2024-11-28T00:30:42,302 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,303 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,304 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:30:42,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:30:42,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:30:42,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:30:42,305 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:30:42,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:30:42,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:30:42,306 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:30:42,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:30:42,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:30:42,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:30:42,307 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:30:42,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:42,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:42,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:42,308 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:42,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:30:42,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:42,309 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:30:42,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742340_1516 (size=131440) 2024-11-28T00:30:42,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742340_1516 (size=131440) 2024-11-28T00:30:42,363 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742340_1516 (size=131440) 2024-11-28T00:30:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742341_1517 (size=4188619) 2024-11-28T00:30:42,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742341_1517 (size=4188619) 2024-11-28T00:30:42,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742341_1517 (size=4188619) 2024-11-28T00:30:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742342_1518 (size=1323991) 2024-11-28T00:30:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742342_1518 (size=1323991) 2024-11-28T00:30:42,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742342_1518 (size=1323991) 2024-11-28T00:30:42,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742343_1519 (size=903927) 2024-11-28T00:30:42,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742343_1519 (size=903927) 2024-11-28T00:30:42,444 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742343_1519 (size=903927) 2024-11-28T00:30:42,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742344_1520 (size=8360083) 2024-11-28T00:30:42,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742344_1520 (size=8360083) 2024-11-28T00:30:42,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742344_1520 (size=8360083) 2024-11-28T00:30:42,486 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742345_1521 (size=1877034) 2024-11-28T00:30:42,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742345_1521 (size=1877034) 2024-11-28T00:30:42,487 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742345_1521 (size=1877034) 2024-11-28T00:30:42,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742346_1522 (size=77835) 2024-11-28T00:30:42,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742346_1522 (size=77835) 2024-11-28T00:30:42,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742346_1522 (size=77835) 2024-11-28T00:30:42,509 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742347_1523 (size=30949) 2024-11-28T00:30:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742347_1523 (size=30949) 2024-11-28T00:30:42,510 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742347_1523 (size=30949) 2024-11-28T00:30:42,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742348_1524 (size=1597213) 2024-11-28T00:30:42,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742348_1524 (size=1597213) 2024-11-28T00:30:42,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742348_1524 (size=1597213) 2024-11-28T00:30:42,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742349_1525 (size=4695811) 2024-11-28T00:30:42,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742349_1525 (size=4695811) 2024-11-28T00:30:42,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742349_1525 (size=4695811) 2024-11-28T00:30:42,733 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0008_000001 (auth:SIMPLE) from 127.0.0.1:56778 2024-11-28T00:30:42,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0008/container_1732753513471_0008_01_000001/launch_container.sh] 2024-11-28T00:30:42,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0008/container_1732753513471_0008_01_000001/container_tokens] 2024-11-28T00:30:42,747 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0008/container_1732753513471_0008_01_000001/sysfs] 2024-11-28T00:30:42,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742350_1526 (size=232957) 2024-11-28T00:30:42,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742350_1526 (size=232957) 2024-11-28T00:30:42,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742350_1526 (size=232957) 2024-11-28T00:30:42,974 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742351_1527 (size=127628) 2024-11-28T00:30:42,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742351_1527 (size=127628) 2024-11-28T00:30:42,975 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742351_1527 (size=127628) 2024-11-28T00:30:42,990 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742352_1528 (size=20406) 2024-11-28T00:30:42,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742352_1528 (size=20406) 2024-11-28T00:30:42,991 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742352_1528 (size=20406) 2024-11-28T00:30:43,451 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742353_1529 (size=5175431) 2024-11-28T00:30:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742353_1529 (size=5175431) 2024-11-28T00:30:43,452 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742353_1529 (size=5175431) 2024-11-28T00:30:43,461 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742354_1530 (size=217634) 2024-11-28T00:30:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742354_1530 (size=217634) 2024-11-28T00:30:43,462 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742354_1530 (size=217634) 2024-11-28T00:30:43,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742355_1531 (size=1832290) 2024-11-28T00:30:43,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742355_1531 (size=1832290) 2024-11-28T00:30:43,503 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742355_1531 (size=1832290) 2024-11-28T00:30:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742356_1532 (size=322274) 2024-11-28T00:30:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742356_1532 (size=322274) 2024-11-28T00:30:43,528 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742356_1532 (size=322274) 2024-11-28T00:30:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742357_1533 (size=6424743) 2024-11-28T00:30:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742357_1533 (size=6424743) 2024-11-28T00:30:43,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742357_1533 (size=6424743) 2024-11-28T00:30:43,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742358_1534 (size=503880) 2024-11-28T00:30:43,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742358_1534 (size=503880) 2024-11-28T00:30:43,618 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742358_1534 (size=503880) 2024-11-28T00:30:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742359_1535 (size=29229) 2024-11-28T00:30:43,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742359_1535 (size=29229) 2024-11-28T00:30:43,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742359_1535 (size=29229) 2024-11-28T00:30:43,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742360_1536 (size=24096) 2024-11-28T00:30:43,696 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742360_1536 (size=24096) 2024-11-28T00:30:43,697 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742360_1536 (size=24096) 2024-11-28T00:30:43,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742361_1537 (size=111872) 2024-11-28T00:30:43,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742361_1537 (size=111872) 2024-11-28T00:30:43,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742361_1537 (size=111872) 2024-11-28T00:30:43,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742362_1538 (size=440957) 2024-11-28T00:30:43,771 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742362_1538 (size=440957) 2024-11-28T00:30:43,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742362_1538 (size=440957) 2024-11-28T00:30:43,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742363_1539 (size=45609) 2024-11-28T00:30:43,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742363_1539 (size=45609) 2024-11-28T00:30:43,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742363_1539 (size=45609) 2024-11-28T00:30:43,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742364_1540 (size=136454) 2024-11-28T00:30:43,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742364_1540 (size=136454) 2024-11-28T00:30:43,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742364_1540 (size=136454) 2024-11-28T00:30:43,832 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:30:43,834 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-28T00:30:43,836 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:30:43,837 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:30:43,837 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:30:43,837 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:30:43,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:30:43,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:30:43,904 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742365_1541 (size=1023) 2024-11-28T00:30:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:30:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:30:43,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742366_1542 (size=35) 2024-11-28T00:30:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742367_1543 (size=304042) 2024-11-28T00:30:44,040 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742367_1543 (size=304042) 2024-11-28T00:30:44,041 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742367_1543 (size=304042) 2024-11-28T00:30:44,075 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:30:44,075 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:30:44,193 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:30:44,229 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:56220 2024-11-28T00:30:45,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-28T00:30:45,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum Metrics about Tables on a single HBase RegionServer 2024-11-28T00:30:45,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testEmptyExportFileSystemState 2024-11-28T00:30:51,044 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:30:52,296 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:54414 2024-11-28T00:30:52,746 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742368_1544 (size=349740) 2024-11-28T00:30:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742368_1544 (size=349740) 2024-11-28T00:30:52,747 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742368_1544 (size=349740) 2024-11-28T00:30:54,568 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:40390 2024-11-28T00:30:54,568 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:42118 2024-11-28T00:30:55,419 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:40394 2024-11-28T00:30:55,424 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:42126 2024-11-28T00:30:57,733 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:30:58,581 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 76fb4953fe949fdbfff19f1d73203841, had cached 0 bytes from a total of 14663 2024-11-28T00:30:58,581 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3790779e6552562e94070478b4a3d9dc, had cached 0 bytes from a total of 5890 2024-11-28T00:31:02,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000003/launch_container.sh] 2024-11-28T00:31:02,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000003/container_tokens] 2024-11-28T00:31:02,027 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000003/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:03,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:38790 2024-11-28T00:31:03,956 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:04,832 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 91195ca5c70c54af0f84245ffc432b72 1/1 column families, dataSize=1.47 KB heapSize=3.49 KB 2024-11-28T00:31:04,870 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/f169a57ebf564c97bd956e680a279ac8 is 74, key is testtb-testExportFileSystemStateWithMergeRegion-1/l:/1732753811255/DeleteFamily/seqid=0 2024-11-28T00:31:04,898 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000004/launch_container.sh] 2024-11-28T00:31:04,898 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000004/container_tokens] 2024-11-28T00:31:04,898 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000004/sysfs] 2024-11-28T00:31:04,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742370_1546 (size=5791) 2024-11-28T00:31:04,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742370_1546 (size=5791) 2024-11-28T00:31:04,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742370_1546 (size=5791) 2024-11-28T00:31:04,900 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.47 KB at sequenceid=28 (bloomFilter=false), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/f169a57ebf564c97bd956e680a279ac8 2024-11-28T00:31:04,907 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f169a57ebf564c97bd956e680a279ac8 2024-11-28T00:31:04,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/f169a57ebf564c97bd956e680a279ac8 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/l/f169a57ebf564c97bd956e680a279ac8 2024-11-28T00:31:04,943 INFO [MemStoreFlusher.0 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f169a57ebf564c97bd956e680a279ac8 2024-11-28T00:31:04,943 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/l/f169a57ebf564c97bd956e680a279ac8, entries=13, sequenceid=28, filesize=5.7 K 2024-11-28T00:31:04,944 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.47 KB/1504, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 91195ca5c70c54af0f84245ffc432b72 in 112ms, sequenceid=28, compaction requested=false 2024-11-28T00:31:04,944 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 91195ca5c70c54af0f84245ffc432b72: 2024-11-28T00:31:05,091 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000002/launch_container.sh] 2024-11-28T00:31:05,091 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000002/container_tokens] 2024-11-28T00:31:05,091 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000002/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:05,496 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:38794 2024-11-28T00:31:06,503 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:48454 2024-11-28T00:31:06,509 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:38796 2024-11-28T00:31:08,931 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000011 while processing FINISH_CONTAINERS event 2024-11-28T00:31:09,068 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000012 while processing FINISH_CONTAINERS event 2024-11-28T00:31:09,093 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000005/launch_container.sh] 2024-11-28T00:31:09,093 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000005/container_tokens] 2024-11-28T00:31:09,093 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000005/sysfs] 2024-11-28T00:31:09,957 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 91195ca5c70c54af0f84245ffc432b72, had cached 0 bytes from a total of 5791 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:12,528 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:38800 2024-11-28T00:31:12,639 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000008/launch_container.sh] 2024-11-28T00:31:12,639 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000008/container_tokens] 2024-11-28T00:31:12,639 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000008/sysfs] 2024-11-28T00:31:12,920 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 76fb4953fe949fdbfff19f1d73203841 changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:31:12,921 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 6b36346bd216c8d9f6f6904a7bb7bfcf changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:31:12,921 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 308b34fbec8bee883c7512d318852c3f changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:31:12,921 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 3790779e6552562e94070478b4a3d9dc changed from -1.0 to 0.0, refreshing cache Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:13,535 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45620 2024-11-28T00:31:13,962 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000009/launch_container.sh] 2024-11-28T00:31:13,962 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000009/container_tokens] 2024-11-28T00:31:13,962 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000009/sysfs] 2024-11-28T00:31:14,190 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000010/launch_container.sh] 2024-11-28T00:31:14,190 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000010/container_tokens] 2024-11-28T00:31:14,190 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000010/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:14,539 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45626 2024-11-28T00:31:14,900 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000015 while processing FINISH_CONTAINERS event 2024-11-28T00:31:15,544 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45740 2024-11-28T00:31:15,921 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000007/launch_container.sh] 2024-11-28T00:31:15,921 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000007/container_tokens] 2024-11-28T00:31:15,921 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000007/sysfs] 2024-11-28T00:31:17,166 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000018 while processing FINISH_CONTAINERS event 2024-11-28T00:31:17,903 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000019 while processing FINISH_CONTAINERS event 2024-11-28T00:31:17,955 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000013/launch_container.sh] 2024-11-28T00:31:17,955 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000013/container_tokens] 2024-11-28T00:31:17,955 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000013/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:19,455 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000014/launch_container.sh] 2024-11-28T00:31:19,455 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000014/container_tokens] 2024-11-28T00:31:19,455 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000014/sysfs] 2024-11-28T00:31:19,562 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45640 2024-11-28T00:31:20,567 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45756 Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:21,577 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45646 2024-11-28T00:31:21,751 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000022 while processing FINISH_CONTAINERS event 2024-11-28T00:31:22,319 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000017/launch_container.sh] 2024-11-28T00:31:22,319 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000017/container_tokens] 2024-11-28T00:31:22,319 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000017/sysfs] Error: java.io.IOException: Checksum mismatch between hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 and file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/local-export-1732753841171/archive/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07. Input and output filesystems are of different types. Their checksum algorithms may be incompatible. You can choose file-level checksum validation via -Ddfs.checksum.combine.mode=COMPOSITE_CRC when block-sizes or filesystems are different. Or you can skip checksum-checks altogether with -no-checksum-verify, for the table backup scenario, you should use -i option to skip checksum-checks. (NOTE: By skipping checksums, one runs the risk of masking data-corruption during file-transfer.) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.verifyCopyResult(ExportSnapshot.java:601) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.copyFile(ExportSnapshot.java:337) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:259) at org.apache.hadoop.hbase.snapshot.ExportSnapshot$ExportMapper.map(ExportSnapshot.java:183) at org.apache.hadoop.mapreduce.Mapper.run(Mapper.java:145) at org.apache.hadoop.mapred.MapTask.runNewMapper(MapTask.java:800) at org.apache.hadoop.mapred.MapTask.run(MapTask.java:348) at org.apache.hadoop.mapred.YarnChild$2.run(YarnChild.java:178) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.mapred.YarnChild.main(YarnChild.java:172) 2024-11-28T00:31:22,589 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:45660 2024-11-28T00:31:24,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 308b34fbec8bee883c7512d318852c3f, had cached 0 bytes from a total of 5700 2024-11-28T00:31:24,344 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 6b36346bd216c8d9f6f6904a7bb7bfcf, had cached 0 bytes from a total of 15057 2024-11-28T00:31:24,752 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0009_01_000025 while processing FINISH_CONTAINERS event 2024-11-28T00:31:25,895 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000016/launch_container.sh] 2024-11-28T00:31:25,895 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000016/container_tokens] 2024-11-28T00:31:25,895 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000016/sysfs] 2024-11-28T00:31:26,815 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:39948 2024-11-28T00:31:26,815 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:56162 2024-11-28T00:31:26,816 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:39956 2024-11-28T00:31:26,933 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732753513471_0009_01_000021 is : 143 2024-11-28T00:31:26,972 WARN [ContainersLauncher #5 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732753513471_0009_01_000023 is : 143 2024-11-28T00:31:27,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742369_1545 (size=49086) 2024-11-28T00:31:27,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742369_1545 (size=49086) 2024-11-28T00:31:27,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742369_1545 (size=49086) 2024-11-28T00:31:27,038 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000021/launch_container.sh] 2024-11-28T00:31:27,038 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000021/container_tokens] 2024-11-28T00:31:27,038 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_3/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000021/sysfs] 2024-11-28T00:31:27,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000023/launch_container.sh] 2024-11-28T00:31:27,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000023/container_tokens] 2024-11-28T00:31:27,039 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000023/sysfs] 2024-11-28T00:31:27,043 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732753513471_0009_01_000024 is : 143 2024-11-28T00:31:27,074 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000024/launch_container.sh] 2024-11-28T00:31:27,074 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000024/container_tokens] 2024-11-28T00:31:27,074 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000024/sysfs] 2024-11-28T00:31:27,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000020/launch_container.sh] 2024-11-28T00:31:27,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000020/container_tokens] 2024-11-28T00:31:27,081 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000020/sysfs] 2024-11-28T00:31:27,089 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742371_1547 (size=461) 2024-11-28T00:31:27,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742371_1547 (size=461) 2024-11-28T00:31:27,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742371_1547 (size=461) 2024-11-28T00:31:27,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742372_1548 (size=49086) 2024-11-28T00:31:27,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742372_1548 (size=49086) 2024-11-28T00:31:27,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742372_1548 (size=49086) 2024-11-28T00:31:27,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742373_1549 (size=349740) 2024-11-28T00:31:27,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742373_1549 (size=349740) 2024-11-28T00:31:27,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742373_1549 (size=349740) 2024-11-28T00:31:27,180 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:39972 2024-11-28T00:31:28,438 ERROR [Time-limited test {}] snapshot.ExportSnapshot(1239): Snapshot export failed org.apache.hadoop.hbase.snapshot.ExportSnapshotException: Task failed task_1732753513471_0009_m_000001 Job failed as tasks failed. failedMaps:1 failedReduces:0 killedMaps:0 killedReduces: 0 at org.apache.hadoop.hbase.snapshot.ExportSnapshot.runCopyJob(ExportSnapshot.java:947) ~[classes/:?] at org.apache.hadoop.hbase.snapshot.ExportSnapshot.doWork(ExportSnapshot.java:1216) ~[classes/:?] at org.apache.hadoop.hbase.util.AbstractHBaseTool.run(AbstractHBaseTool.java:150) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.util.ToolRunner.run(ToolRunner.java:82) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.runExportSnapshot(TestExportSnapshot.java:570) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportFileSystemState(TestExportSnapshot.java:400) ~[test-classes/:?] at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.testExportWithChecksum(TestExportSnapshot.java:285) ~[test-classes/:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) ~[junit-4.13.2.jar:4.13.2] at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) ~[junit-4.13.2.jar:4.13.2] at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) ~[junit-4.13.2.jar:4.13.2] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:31:28,439 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439 2024-11-28T00:31:28,439 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:28,468 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:28,468 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439, skipTmp=false, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:31:28,470 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:31:28,475 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/.tmp/snaptb0-testExportWithChecksum 2024-11-28T00:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742375_1551 (size=951) 2024-11-28T00:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742374_1550 (size=156) 2024-11-28T00:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742375_1551 (size=951) 2024-11-28T00:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742374_1550 (size=156) 2024-11-28T00:31:28,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742374_1550 (size=156) 2024-11-28T00:31:28,490 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742375_1551 (size=951) 2024-11-28T00:31:28,491 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:28,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:28,492 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-15215199474881861444.jar 2024-11-28T00:31:29,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,499 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-16391144974006732069.jar 2024-11-28T00:31:29,565 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:29,566 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:31:29,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:31:29,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:31:29,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:31:29,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:31:29,567 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:31:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:31:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:31:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:31:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:31:29,568 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:29,569 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:29,570 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:29,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742376_1552 (size=131440) 2024-11-28T00:31:29,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742376_1552 (size=131440) 2024-11-28T00:31:29,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742376_1552 (size=131440) 2024-11-28T00:31:29,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742377_1553 (size=4188619) 2024-11-28T00:31:29,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742377_1553 (size=4188619) 2024-11-28T00:31:29,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742377_1553 (size=4188619) 2024-11-28T00:31:29,647 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742378_1554 (size=1323991) 2024-11-28T00:31:29,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742378_1554 (size=1323991) 2024-11-28T00:31:29,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742378_1554 (size=1323991) 2024-11-28T00:31:29,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742379_1555 (size=903927) 2024-11-28T00:31:29,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742379_1555 (size=903927) 2024-11-28T00:31:29,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742379_1555 (size=903927) 2024-11-28T00:31:29,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742380_1556 (size=8360083) 2024-11-28T00:31:29,683 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742380_1556 (size=8360083) 2024-11-28T00:31:29,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742380_1556 (size=8360083) 2024-11-28T00:31:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742381_1557 (size=1877034) 2024-11-28T00:31:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742381_1557 (size=1877034) 2024-11-28T00:31:29,694 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742381_1557 (size=1877034) 2024-11-28T00:31:29,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742382_1558 (size=77835) 2024-11-28T00:31:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742382_1558 (size=77835) 2024-11-28T00:31:29,701 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742382_1558 (size=77835) 2024-11-28T00:31:29,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742383_1559 (size=30949) 2024-11-28T00:31:29,706 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742383_1559 (size=30949) 2024-11-28T00:31:29,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742383_1559 (size=30949) 2024-11-28T00:31:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742384_1560 (size=1597213) 2024-11-28T00:31:29,717 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742384_1560 (size=1597213) 2024-11-28T00:31:29,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742384_1560 (size=1597213) 2024-11-28T00:31:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742385_1561 (size=4695811) 2024-11-28T00:31:29,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742385_1561 (size=4695811) 2024-11-28T00:31:29,743 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742385_1561 (size=4695811) 2024-11-28T00:31:29,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742386_1562 (size=232957) 2024-11-28T00:31:29,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742386_1562 (size=232957) 2024-11-28T00:31:29,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742386_1562 (size=232957) 2024-11-28T00:31:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742387_1563 (size=127628) 2024-11-28T00:31:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742387_1563 (size=127628) 2024-11-28T00:31:29,756 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742387_1563 (size=127628) 2024-11-28T00:31:29,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742388_1564 (size=6424743) 2024-11-28T00:31:29,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742388_1564 (size=6424743) 2024-11-28T00:31:29,783 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742388_1564 (size=6424743) 2024-11-28T00:31:29,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:31:29,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:31:29,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742389_1565 (size=20406) 2024-11-28T00:31:29,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742390_1566 (size=5175431) 2024-11-28T00:31:29,811 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742390_1566 (size=5175431) 2024-11-28T00:31:29,812 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742390_1566 (size=5175431) 2024-11-28T00:31:29,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742391_1567 (size=217634) 2024-11-28T00:31:29,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742391_1567 (size=217634) 2024-11-28T00:31:29,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742391_1567 (size=217634) 2024-11-28T00:31:29,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742392_1568 (size=440957) 2024-11-28T00:31:29,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742392_1568 (size=440957) 2024-11-28T00:31:29,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742392_1568 (size=440957) 2024-11-28T00:31:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742393_1569 (size=1832290) 2024-11-28T00:31:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742393_1569 (size=1832290) 2024-11-28T00:31:29,836 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742393_1569 (size=1832290) 2024-11-28T00:31:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742394_1570 (size=322274) 2024-11-28T00:31:29,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742394_1570 (size=322274) 2024-11-28T00:31:29,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742394_1570 (size=322274) 2024-11-28T00:31:29,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742395_1571 (size=503880) 2024-11-28T00:31:29,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742395_1571 (size=503880) 2024-11-28T00:31:29,856 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742395_1571 (size=503880) 2024-11-28T00:31:29,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742396_1572 (size=29229) 2024-11-28T00:31:29,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742396_1572 (size=29229) 2024-11-28T00:31:29,862 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742396_1572 (size=29229) 2024-11-28T00:31:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742397_1573 (size=24096) 2024-11-28T00:31:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742397_1573 (size=24096) 2024-11-28T00:31:29,868 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742397_1573 (size=24096) 2024-11-28T00:31:29,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742398_1574 (size=111872) 2024-11-28T00:31:29,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742398_1574 (size=111872) 2024-11-28T00:31:29,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742398_1574 (size=111872) 2024-11-28T00:31:29,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742399_1575 (size=45609) 2024-11-28T00:31:29,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742399_1575 (size=45609) 2024-11-28T00:31:29,884 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742399_1575 (size=45609) 2024-11-28T00:31:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742400_1576 (size=136454) 2024-11-28T00:31:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742400_1576 (size=136454) 2024-11-28T00:31:29,899 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742400_1576 (size=136454) 2024-11-28T00:31:29,900 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:31:29,902 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportWithChecksum' hfile list 2024-11-28T00:31:29,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=14.7 K 2024-11-28T00:31:29,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:31:29,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.6 K 2024-11-28T00:31:29,905 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=4.9 K 2024-11-28T00:31:29,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742401_1577 (size=1023) 2024-11-28T00:31:29,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742401_1577 (size=1023) 2024-11-28T00:31:29,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742401_1577 (size=1023) 2024-11-28T00:31:29,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742402_1578 (size=35) 2024-11-28T00:31:29,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742402_1578 (size=35) 2024-11-28T00:31:29,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742402_1578 (size=35) 2024-11-28T00:31:29,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742403_1579 (size=303996) 2024-11-28T00:31:29,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742403_1579 (size=303996) 2024-11-28T00:31:29,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742403_1579 (size=303996) 2024-11-28T00:31:33,297 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:31:33,297 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:31:33,302 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0009_000001 (auth:SIMPLE) from 127.0.0.1:49132 2024-11-28T00:31:33,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:31:34,054 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:55542 2024-11-28T00:31:38,420 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000001/launch_container.sh] 2024-11-28T00:31:38,420 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000001/container_tokens] 2024-11-28T00:31:38,420 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_1/usercache/jenkins/appcache/application_1732753513471_0009/container_1732753513471_0009_01_000001/sysfs] 2024-11-28T00:31:39,239 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:44168 2024-11-28T00:31:39,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742404_1580 (size=349694) 2024-11-28T00:31:39,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742404_1580 (size=349694) 2024-11-28T00:31:39,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742404_1580 (size=349694) 2024-11-28T00:31:41,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:49134 2024-11-28T00:31:41,471 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:55546 2024-11-28T00:31:42,328 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:55562 2024-11-28T00:31:42,341 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:49148 2024-11-28T00:31:43,582 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 76fb4953fe949fdbfff19f1d73203841, had cached 0 bytes from a total of 14663 2024-11-28T00:31:43,582 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3790779e6552562e94070478b4a3d9dc, had cached 0 bytes from a total of 5890 2024-11-28T00:31:45,296 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0010_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:31:48,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742405_1581 (size=8242) 2024-11-28T00:31:48,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742405_1581 (size=8242) 2024-11-28T00:31:48,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742405_1581 (size=8242) 2024-11-28T00:31:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742407_1583 (size=15057) 2024-11-28T00:31:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742407_1583 (size=15057) 2024-11-28T00:31:49,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742407_1583 (size=15057) 2024-11-28T00:31:50,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742408_1584 (size=5032) 2024-11-28T00:31:50,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742408_1584 (size=5032) 2024-11-28T00:31:50,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742408_1584 (size=5032) 2024-11-28T00:31:50,235 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000005/launch_container.sh] 2024-11-28T00:31:50,235 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000005/container_tokens] 2024-11-28T00:31:50,235 WARN [ContainersLauncher #4 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000005/sysfs] 2024-11-28T00:31:50,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742409_1585 (size=5700) 2024-11-28T00:31:50,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742409_1585 (size=5700) 2024-11-28T00:31:50,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742409_1585 (size=5700) 2024-11-28T00:31:50,425 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000004/launch_container.sh] 2024-11-28T00:31:50,425 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000004/container_tokens] 2024-11-28T00:31:50,425 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000004/sysfs] 2024-11-28T00:31:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742406_1582 (size=31740) 2024-11-28T00:31:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742406_1582 (size=31740) 2024-11-28T00:31:50,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742406_1582 (size=31740) 2024-11-28T00:31:50,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742410_1586 (size=463) 2024-11-28T00:31:50,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742410_1586 (size=463) 2024-11-28T00:31:50,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742410_1586 (size=463) 2024-11-28T00:31:50,556 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000002/launch_container.sh] 2024-11-28T00:31:50,557 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000002/container_tokens] 2024-11-28T00:31:50,557 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000002/sysfs] 2024-11-28T00:31:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742411_1587 (size=31740) 2024-11-28T00:31:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742411_1587 (size=31740) 2024-11-28T00:31:50,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742411_1587 (size=31740) 2024-11-28T00:31:50,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742412_1588 (size=349694) 2024-11-28T00:31:50,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742412_1588 (size=349694) 2024-11-28T00:31:50,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742412_1588 (size=349694) 2024-11-28T00:31:50,619 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:44348 2024-11-28T00:31:50,636 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:44358 2024-11-28T00:31:52,129 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:31:52,131 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:31:52,146 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportWithChecksum 2024-11-28T00:31:52,146 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:31:52,147 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/snaptb0-testExportWithChecksum at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/snaptb0-testExportWithChecksum 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/snaptb0-testExportWithChecksum/.snapshotinfo 2024-11-28T00:31:52,147 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753888439/.hbase-snapshot/snaptb0-testExportWithChecksum/data.manifest 2024-11-28T00:31:52,153 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportWithChecksum 2024-11-28T00:31:52,155 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=227, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,157 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-11-28T00:31:52,158 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753912157"}]},"ts":"1732753912157"} 2024-11-28T00:31:52,160 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLING in hbase:meta 2024-11-28T00:31:52,160 INFO [PEWorker-5 {}] procedure.DisableTableProcedure(284): Set testtb-testExportWithChecksum to state=DISABLING 2024-11-28T00:31:52,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=228, ppid=227, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum}] 2024-11-28T00:31:52,162 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, UNASSIGN}, {pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, UNASSIGN}] 2024-11-28T00:31:52,163 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, UNASSIGN 2024-11-28T00:31:52,163 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, UNASSIGN 2024-11-28T00:31:52,164 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=6b36346bd216c8d9f6f6904a7bb7bfcf, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:31:52,167 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=308b34fbec8bee883c7512d318852c3f, regionState=CLOSING, regionLocation=72c8c7f04009,41621,1732753506207 2024-11-28T00:31:52,168 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=230, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, UNASSIGN because future has completed 2024-11-28T00:31:52,170 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:31:52,170 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:31:52,171 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=229, ppid=228, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, UNASSIGN because future has completed 2024-11-28T00:31:52,172 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:31:52,172 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207}] 2024-11-28T00:31:52,261 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-11-28T00:31:52,324 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(122): Close 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:31:52,324 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:31:52,324 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1722): Closing 6b36346bd216c8d9f6f6904a7bb7bfcf, disabling compactions & flushes 2024-11-28T00:31:52,324 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:31:52,324 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:31:52,324 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. after waiting 0 ms 2024-11-28T00:31:52,324 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:31:52,327 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(122): Close 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:31:52,327 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:31:52,327 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1722): Closing 308b34fbec8bee883c7512d318852c3f, disabling compactions & flushes 2024-11-28T00:31:52,327 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1755): Closing region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:31:52,327 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:31:52,327 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. after waiting 0 ms 2024-11-28T00:31:52,327 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:31:52,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:31:52,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:31:52,329 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf. 2024-11-28T00:31:52,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] regionserver.HRegion(1676): Region close journal for 6b36346bd216c8d9f6f6904a7bb7bfcf: Waiting for close lock at 1732753912324Running coprocessor pre-close hooks at 1732753912324Disabling compacts and flushes for region at 1732753912324Disabling writes for close at 1732753912324Writing region close event to WAL at 1732753912325 (+1 ms)Running coprocessor post-close hooks at 1732753912329 (+4 ms)Closed at 1732753912329 2024-11-28T00:31:52,331 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:31:52,331 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=231}] handler.UnassignRegionHandler(157): Closed 6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:31:52,331 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:31:52,331 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1973): Closed testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f. 2024-11-28T00:31:52,331 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] regionserver.HRegion(1676): Region close journal for 308b34fbec8bee883c7512d318852c3f: Waiting for close lock at 1732753912327Running coprocessor pre-close hooks at 1732753912327Disabling compacts and flushes for region at 1732753912327Disabling writes for close at 1732753912327Writing region close event to WAL at 1732753912328 (+1 ms)Running coprocessor post-close hooks at 1732753912331 (+3 ms)Closed at 1732753912331 2024-11-28T00:31:52,332 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=230 updating hbase:meta row=6b36346bd216c8d9f6f6904a7bb7bfcf, regionState=CLOSED 2024-11-28T00:31:52,333 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=232}] handler.UnassignRegionHandler(157): Closed 308b34fbec8bee883c7512d318852c3f 2024-11-28T00:31:52,333 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=229 updating hbase:meta row=308b34fbec8bee883c7512d318852c3f, regionState=CLOSED 2024-11-28T00:31:52,333 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=231, ppid=230, state=RUNNABLE, hasLock=false; CloseRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:31:52,335 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=232, ppid=229, state=RUNNABLE, hasLock=false; CloseRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207 because future has completed 2024-11-28T00:31:52,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=231, resume processing ppid=230 2024-11-28T00:31:52,336 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=231, ppid=230, state=SUCCESS, hasLock=false; CloseRegionProcedure 6b36346bd216c8d9f6f6904a7bb7bfcf, server=72c8c7f04009,36427,1732753506332 in 164 msec 2024-11-28T00:31:52,337 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=230, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=6b36346bd216c8d9f6f6904a7bb7bfcf, UNASSIGN in 174 msec 2024-11-28T00:31:52,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=232, resume processing ppid=229 2024-11-28T00:31:52,337 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=232, ppid=229, state=SUCCESS, hasLock=false; CloseRegionProcedure 308b34fbec8bee883c7512d318852c3f, server=72c8c7f04009,41621,1732753506207 in 164 msec 2024-11-28T00:31:52,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=229, resume processing ppid=228 2024-11-28T00:31:52,339 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=229, ppid=228, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportWithChecksum, region=308b34fbec8bee883c7512d318852c3f, UNASSIGN in 175 msec 2024-11-28T00:31:52,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=228, resume processing ppid=227 2024-11-28T00:31:52,341 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=228, ppid=227, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportWithChecksum in 179 msec 2024-11-28T00:31:52,342 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753912342"}]},"ts":"1732753912342"} 2024-11-28T00:31:52,343 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportWithChecksum, state=DISABLED in hbase:meta 2024-11-28T00:31:52,343 INFO [PEWorker-2 {}] procedure.DisableTableProcedure(296): Set testtb-testExportWithChecksum to state=DISABLED 2024-11-28T00:31:52,345 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=227, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportWithChecksum in 190 msec 2024-11-28T00:31:52,471 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=227 2024-11-28T00:31:52,471 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:31:52,471 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportWithChecksum 2024-11-28T00:31:52,472 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,473 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=233, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportWithChecksum 2024-11-28T00:31:52,474 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=233, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,477 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f 2024-11-28T00:31:52,477 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:31:52,479 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/recovered.edits] 2024-11-28T00:31:52,479 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/recovered.edits] 2024-11-28T00:31:52,480 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportWithChecksum 2024-11-28T00:31:52,482 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/cf/1288ff7b32c049be928fa529e3f90e07 2024-11-28T00:31:52,482 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/cf/2589baf60cd54238a2e5d90fedd81569 2024-11-28T00:31:52,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,483 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:31:52,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:31:52,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:31:52,484 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportWithChecksum with data PBUF 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportWithChecksum 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:52,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:52,486 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-11-28T00:31:52,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:52,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:52,486 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:52,487 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:52,487 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf/recovered.edits/9.seqid 2024-11-28T00:31:52,487 DEBUG [HFileArchiver-25 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:31:52,488 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f/recovered.edits/9.seqid 2024-11-28T00:31:52,488 DEBUG [HFileArchiver-24 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportWithChecksum/308b34fbec8bee883c7512d318852c3f 2024-11-28T00:31:52,488 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportWithChecksum regions 2024-11-28T00:31:52,489 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-28T00:31:52,489 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf] 2024-11-28T00:31:52,492 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/c4ca4238a0b923820dcc509a6f75849b20241128a3ddafab93274458a377b4d7a81c124e_6b36346bd216c8d9f6f6904a7bb7bfcf 2024-11-28T00:31:52,493 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6/cf/d41d8cd98f00b204e9800998ecf8427e202411287f7b09d440004a3f96fde8629c21361c_308b34fbec8bee883c7512d318852c3f 2024-11-28T00:31:52,493 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportWithChecksum/079394da860334b7f5313f35a50f5bc6 2024-11-28T00:31:52,495 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=233, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,497 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportWithChecksum from hbase:meta 2024-11-28T00:31:52,499 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportWithChecksum' descriptor. 2024-11-28T00:31:52,500 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=233, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,500 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportWithChecksum' from region states. 2024-11-28T00:31:52,500 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753912500"}]},"ts":"9223372036854775807"} 2024-11-28T00:31:52,500 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753912500"}]},"ts":"9223372036854775807"} 2024-11-28T00:31:52,502 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:31:52,502 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => 308b34fbec8bee883c7512d318852c3f, NAME => 'testtb-testExportWithChecksum,,1732753838978.308b34fbec8bee883c7512d318852c3f.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 6b36346bd216c8d9f6f6904a7bb7bfcf, NAME => 'testtb-testExportWithChecksum,1,1732753838978.6b36346bd216c8d9f6f6904a7bb7bfcf.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:31:52,502 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportWithChecksum' as deleted. 2024-11-28T00:31:52,502 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportWithChecksum","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753912502"}]},"ts":"9223372036854775807"} 2024-11-28T00:31:52,504 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportWithChecksum state from META 2024-11-28T00:31:52,504 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=233, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportWithChecksum 2024-11-28T00:31:52,505 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=233, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportWithChecksum in 33 msec 2024-11-28T00:31:52,591 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=233 2024-11-28T00:31:52,591 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportWithChecksum 2024-11-28T00:31:52,591 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportWithChecksum completed 2024-11-28T00:31:52,596 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportWithChecksum" type: DISABLED 2024-11-28T00:31:52,598 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportWithChecksum 2024-11-28T00:31:52,599 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportWithChecksum" type: DISABLED 2024-11-28T00:31:52,600 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportWithChecksum 2024-11-28T00:31:52,620 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportWithChecksum Thread=805 (was 813), OpenFileDescriptor=788 (was 794), MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=1034 (was 767) - SystemLoadAverage LEAK? -, ProcessCount=21 (was 17) - ProcessCount LEAK? -, AvailableMemoryMB=2340 (was 2745) 2024-11-28T00:31:52,620 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-28T00:31:52,637 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=805, OpenFileDescriptor=788, MaxFileDescriptor=1048576, SystemLoadAverage=1034, ProcessCount=21, AvailableMemoryMB=2340 2024-11-28T00:31:52,637 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=805 is superior to 500 2024-11-28T00:31:52,639 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-28T00:31:52,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:52,641 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_PRE_OPERATION 2024-11-28T00:31:52,641 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "testtb-testExportFileSystemStateWithSkipTmp" procId is: 234 2024-11-28T00:31:52,641 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-28T00:31:52,641 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-11-28T00:31:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742413_1589 (size=454) 2024-11-28T00:31:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742413_1589 (size=454) 2024-11-28T00:31:52,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742413_1589 (size=454) 2024-11-28T00:31:52,652 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => ae9a96d161250eea1514d4fe40999a5a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.', STARTKEY => '', ENDKEY => '1'}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:52,653 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(7572): creating {ENCODED => 8ba3ca265e3afad3d2e97b99b7b3f70c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.', STARTKEY => '1', ENDKEY => ''}, tableDescriptor='testtb-testExportFileSystemStateWithSkipTmp', {TABLE_ATTRIBUTES => {REGION_REPLICATION => '1', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:52,662 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742415_1591 (size=79) 2024-11-28T00:31:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742415_1591 (size=79) 2024-11-28T00:31:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742415_1591 (size=79) 2024-11-28T00:31:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742414_1590 (size=79) 2024-11-28T00:31:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742414_1590 (size=79) 2024-11-28T00:31:52,663 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742414_1590 (size=79) 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1722): Closing 8ba3ca265e3afad3d2e97b99b7b3f70c, disabling compactions & flushes 2024-11-28T00:31:52,664 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1722): Closing ae9a96d161250eea1514d4fe40999a5a, disabling compactions & flushes 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. after waiting 0 ms 2024-11-28T00:31:52,664 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,664 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. after waiting 0 ms 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-1 {}] regionserver.HRegion(1676): Region close journal for 8ba3ca265e3afad3d2e97b99b7b3f70c: Waiting for close lock at 1732753912664Disabling compacts and flushes for region at 1732753912664Disabling writes for close at 1732753912664Writing region close event to WAL at 1732753912664Closed at 1732753912664 2024-11-28T00:31:52,664 INFO [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,664 DEBUG [RegionOpenAndInit-testtb-testExportFileSystemStateWithSkipTmp-pool-0 {}] regionserver.HRegion(1676): Region close journal for ae9a96d161250eea1514d4fe40999a5a: Waiting for close lock at 1732753912664Disabling compacts and flushes for region at 1732753912664Disabling writes for close at 1732753912664Writing region close event to WAL at 1732753912664Closed at 1732753912664 2024-11-28T00:31:52,665 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ADD_TO_META 2024-11-28T00:31:52,665 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732753912665"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753912665"}]},"ts":"1732753912665"} 2024-11-28T00:31:52,665 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.","families":{"info":[{"qualifier":"regioninfo","vlen":78,"tag":[],"timestamp":"1732753912665"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732753912665"}]},"ts":"1732753912665"} 2024-11-28T00:31:52,668 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 2 regions to meta. 2024-11-28T00:31:52,668 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-28T00:31:52,669 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753912668"}]},"ts":"1732753912668"} 2024-11-28T00:31:52,670 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLING in hbase:meta 2024-11-28T00:31:52,670 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(204): Hosts are {72c8c7f04009=0} racks are {/default-rack=0} 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 0 has 0 regions 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 1 has 0 regions 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(303): server 2 has 0 regions 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 0 is on host 0 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 1 is on host 0 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(310): server 2 is on host 0 2024-11-28T00:31:52,671 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 0 is on rack 0 2024-11-28T00:31:52,671 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 1 is on rack 0 2024-11-28T00:31:52,671 INFO [PEWorker-5 {}] balancer.BalancerClusterState(321): server 2 is on rack 0 2024-11-28T00:31:52,671 DEBUG [PEWorker-5 {}] balancer.BalancerClusterState(326): Number of tables=1, number of hosts=1, number of racks=1 2024-11-28T00:31:52,671 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, ASSIGN}, {pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, ASSIGN}] 2024-11-28T00:31:52,672 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, ASSIGN 2024-11-28T00:31:52,672 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, ASSIGN 2024-11-28T00:31:52,673 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, ASSIGN; state=OFFLINE, location=72c8c7f04009,43335,1732753506004; forceNewPlan=false, retain=false 2024-11-28T00:31:52,673 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, ASSIGN; state=OFFLINE, location=72c8c7f04009,36427,1732753506332; forceNewPlan=false, retain=false 2024-11-28T00:31:52,751 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-11-28T00:31:52,824 INFO [72c8c7f04009:39673 {}] balancer.BaseLoadBalancer(388): Reassigned 2 regions. 2 retained the pre-restart assignment. 2024-11-28T00:31:52,824 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=ae9a96d161250eea1514d4fe40999a5a, regionState=OPENING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:31:52,824 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=8ba3ca265e3afad3d2e97b99b7b3f70c, regionState=OPENING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:31:52,826 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=235, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, ASSIGN because future has completed 2024-11-28T00:31:52,826 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:31:52,827 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=236, ppid=234, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, ASSIGN because future has completed 2024-11-28T00:31:52,827 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:31:52,961 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-11-28T00:31:52,982 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,982 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7752): Opening region: {ENCODED => ae9a96d161250eea1514d4fe40999a5a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.', STARTKEY => '', ENDKEY => '1'} 2024-11-28T00:31:52,982 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(132): Open testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,982 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7752): Opening region: {ENCODED => 8ba3ca265e3afad3d2e97b99b7b3f70c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.', STARTKEY => '1', ENDKEY => ''} 2024-11-28T00:31:52,982 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. service=AccessControlService 2024-11-28T00:31:52,983 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(8280): Registered coprocessor service: region=testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. service=AccessControlService 2024-11-28T00:31:52,983 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] coprocessor.CoprocessorHost(174): System coprocessor org.apache.hadoop.hbase.security.access.AccessController loaded, priority=536870911. 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7794): checking encryption for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table testtb-testExportFileSystemStateWithSkipTmp 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(7797): checking classloading for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(898): Instantiated testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7794): checking encryption for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,983 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(7797): checking classloading for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,984 INFO [StoreOpener-ae9a96d161250eea1514d4fe40999a5a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,984 INFO [StoreOpener-8ba3ca265e3afad3d2e97b99b7b3f70c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family cf of region 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,985 INFO [StoreOpener-8ba3ca265e3afad3d2e97b99b7b3f70c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8ba3ca265e3afad3d2e97b99b7b3f70c columnFamilyName cf 2024-11-28T00:31:52,985 INFO [StoreOpener-ae9a96d161250eea1514d4fe40999a5a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region ae9a96d161250eea1514d4fe40999a5a columnFamilyName cf 2024-11-28T00:31:52,986 DEBUG [StoreOpener-8ba3ca265e3afad3d2e97b99b7b3f70c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:52,986 DEBUG [StoreOpener-ae9a96d161250eea1514d4fe40999a5a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:52,987 INFO [StoreOpener-8ba3ca265e3afad3d2e97b99b7b3f70c-1 {}] regionserver.HStore(327): Store=8ba3ca265e3afad3d2e97b99b7b3f70c/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:31:52,987 INFO [StoreOpener-ae9a96d161250eea1514d4fe40999a5a-1 {}] regionserver.HStore(327): Store=ae9a96d161250eea1514d4fe40999a5a/cf, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-28T00:31:52,987 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1038): replaying wal for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,987 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1038): replaying wal for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,987 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1048): stopping wal replay for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1060): Cleaning up temporary data for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1048): stopping wal replay for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,988 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1060): Cleaning up temporary data for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,990 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1093): writing seq id for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,990 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1093): writing seq id for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,991 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:31:52,991 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-28T00:31:52,991 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1114): Opened 8ba3ca265e3afad3d2e97b99b7b3f70c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=72691318, jitterRate=0.08318504691123962}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:31:52,991 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:52,991 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1114): Opened ae9a96d161250eea1514d4fe40999a5a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=66298515, jitterRate=-0.012075141072273254}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-28T00:31:52,992 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1122): Running coprocessor post-open hooks for ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:52,992 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegion(1006): Region open journal for 8ba3ca265e3afad3d2e97b99b7b3f70c: Running coprocessor pre-open hook at 1732753912983Writing region info on filesystem at 1732753912983Initializing all the Stores at 1732753912984 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753912984Cleaning up temporary data from old regions at 1732753912988 (+4 ms)Running coprocessor post-open hooks at 1732753912991 (+3 ms)Region opened successfully at 1732753912992 (+1 ms) 2024-11-28T00:31:52,992 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegion(1006): Region open journal for ae9a96d161250eea1514d4fe40999a5a: Running coprocessor pre-open hook at 1732753912983Writing region info on filesystem at 1732753912983Initializing all the Stores at 1732753912984 (+1 ms)Instantiating store for column family {NAME => 'cf', INDEX_BLOCK_ENCODING => 'NONE', MOB_THRESHOLD => '0', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', IS_MOB => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732753912984Cleaning up temporary data from old regions at 1732753912988 (+4 ms)Running coprocessor post-open hooks at 1732753912992 (+4 ms)Region opened successfully at 1732753912992 2024-11-28T00:31:52,993 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a., pid=237, masterSystemTime=1732753912979 2024-11-28T00:31:52,993 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2236): Post open deploy tasks for testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c., pid=238, masterSystemTime=1732753912980 2024-11-28T00:31:52,994 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,994 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=237}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:52,995 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=235 updating hbase:meta row=ae9a96d161250eea1514d4fe40999a5a, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:31:52,995 DEBUG [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] regionserver.HRegionServer(2266): Finished post open deploy task for testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,995 INFO [RS_OPEN_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_OPEN_REGION, pid=238}] handler.AssignRegionHandler(153): Opened testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:52,996 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=236 updating hbase:meta row=8ba3ca265e3afad3d2e97b99b7b3f70c, regionState=OPEN, openSeqNum=2, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:31:52,997 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=237, ppid=235, state=RUNNABLE, hasLock=false; OpenRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:31:52,998 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=238, ppid=236, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:31:53,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=237, resume processing ppid=235 2024-11-28T00:31:53,006 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=237, ppid=235, state=SUCCESS, hasLock=false; OpenRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004 in 172 msec 2024-11-28T00:31:53,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=238, resume processing ppid=236 2024-11-28T00:31:53,007 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=238, ppid=236, state=SUCCESS, hasLock=false; OpenRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332 in 179 msec 2024-11-28T00:31:53,008 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=235, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, ASSIGN in 335 msec 2024-11-28T00:31:53,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=236, resume processing ppid=234 2024-11-28T00:31:53,008 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=236, ppid=234, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, ASSIGN in 336 msec 2024-11-28T00:31:53,009 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-28T00:31:53,009 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753913009"}]},"ts":"1732753913009"} 2024-11-28T00:31:53,011 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=ENABLED in hbase:meta 2024-11-28T00:31:53,011 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=234, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp execute state=CREATE_TABLE_POST_OPERATION 2024-11-28T00:31:53,011 DEBUG [PEWorker-5 {}] access.PermissionStorage(177): Writing permission with rowKey testtb-testExportFileSystemStateWithSkipTmp jenkins: RWXCA 2024-11-28T00:31:53,014 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:31:53,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:53,016 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:53,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:53,017 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:31:53,019 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,019 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,019 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,019 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,019 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,020 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,020 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,020 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF\x0AQ\x0A\x07jenkins\x12F\x08\x03"B\x0A6\x0A\x07default\x12+testtb-testExportFileSystemStateWithSkipTmp \x00 \x01 \x02 \x03 \x04 2024-11-28T00:31:53,020 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=234, state=SUCCESS, hasLock=false; CreateTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 379 msec 2024-11-28T00:31:53,270 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=234 2024-11-28T00:31:53,271 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:31:53,271 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,273 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,273 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,273 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:31:53,275 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,279 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,283 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,285 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:31:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753913285 (current time:1732753913285). 2024-11-28T00:31:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:31:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-28T00:31:53,285 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:31:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32113a8c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:31:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:31:53,287 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@306a0a04, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:31:53,287 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,288 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38680, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:31:53,289 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@28eb741a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,289 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:31:53,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:31:53,290 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,291 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47520, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,292 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:31:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:31:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,292 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,292 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:31:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@22c1091c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:31:53,293 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:31:53,293 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:31:53,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:31:53,293 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:31:53,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@35daeac0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:31:53,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:31:53,294 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,295 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38706, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:31:53,295 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b524a6e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,296 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:31:53,296 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:31:53,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,298 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47534, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:31:53,299 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,300 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36414, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,301 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673. 2024-11-28T00:31:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:31:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,301 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:31:53,301 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:31:53,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:31:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:31:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-11-28T00:31:53,303 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-28T00:31:53,304 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:31:53,304 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:31:53,306 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:31:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742416_1592 (size=203) 2024-11-28T00:31:53,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742416_1592 (size=203) 2024-11-28T00:31:53,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742416_1592 (size=203) 2024-11-28T00:31:53,312 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:31:53,313 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a}, {pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c}] 2024-11-28T00:31:53,313 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,313 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,411 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-28T00:31:53,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=241 2024-11-28T00:31:53,465 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=240 2024-11-28T00:31:53,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:53,465 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.HRegion(2603): Flush status journal for 8ba3ca265e3afad3d2e97b99b7b3f70c: 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.HRegion(2603): Flush status journal for ae9a96d161250eea1514d4fe40999a5a: 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. for emptySnaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.' region-info for snapshot=emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:31:53,466 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] snapshot.SnapshotManifest(256): Adding snapshot references for [] hfiles 2024-11-28T00:31:53,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742417_1593 (size=82) 2024-11-28T00:31:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742417_1593 (size=82) 2024-11-28T00:31:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742418_1594 (size=82) 2024-11-28T00:31:53,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742418_1594 (size=82) 2024-11-28T00:31:53,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:53,472 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=241}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=241 2024-11-28T00:31:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742418_1594 (size=82) 2024-11-28T00:31:53,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742417_1593 (size=82) 2024-11-28T00:31:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,473 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-2 {event_type=RS_SNAPSHOT_REGIONS, pid=240}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=240 2024-11-28T00:31:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=241 2024-11-28T00:31:53,473 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,473 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=240 2024-11-28T00:31:53,473 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot emptySnaptb0-testExportFileSystemStateWithSkipTmp on region ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,473 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=241, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,473 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=240, ppid=239, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,475 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=241, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c in 161 msec 2024-11-28T00:31:53,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=240, resume processing ppid=239 2024-11-28T00:31:53,476 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=240, ppid=239, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a in 161 msec 2024-11-28T00:31:53,476 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:31:53,477 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:31:53,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:31:53,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:31:53,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:53,478 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(328): No files under family: cf 2024-11-28T00:31:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742419_1595 (size=74) 2024-11-28T00:31:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742419_1595 (size=74) 2024-11-28T00:31:53,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742419_1595 (size=74) 2024-11-28T00:31:53,490 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:31:53,490 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,491 DEBUG [PEWorker-4 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742420_1596 (size=697) 2024-11-28T00:31:53,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742420_1596 (size=697) 2024-11-28T00:31:53,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742420_1596 (size=697) 2024-11-28T00:31:53,507 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:31:53,511 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:31:53,511 DEBUG [PEWorker-4 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/emptySnaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,512 INFO [PEWorker-4 {}] procedure.SnapshotProcedure(134): pid=239, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:31:53,513 DEBUG [PEWorker-4 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 239 2024-11-28T00:31:53,514 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=239, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=239, snapshot={ ss=emptySnaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 211 msec 2024-11-28T00:31:53,570 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000003/launch_container.sh] 2024-11-28T00:31:53,570 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000003/container_tokens] 2024-11-28T00:31:53,570 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_0/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000003/sysfs] 2024-11-28T00:31:53,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=239 2024-11-28T00:31:53,622 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:31:53,626 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43335 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:31:53,628 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36427 {}] regionserver.HRegion(8528): writing data to region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. with WAL disabled. Data may be lost in the event of a crash. 2024-11-28T00:31:53,629 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 2 regions for table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,631 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,632 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [60,000] milli-secs(wait.for.ratio=[1]) 2024-11-28T00:31:53,633 DEBUG [RPCClient-NioEventLoopGroup-6-3 {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,637 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,641 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=testtb-testExportFileSystemStateWithSkipTmp,, stopping at row=testtb-testExportFileSystemStateWithSkipTmp ,, for max=2147483647 with caching=100 2024-11-28T00:31:53,643 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1763): Client=jenkins//172.17.0.2 snapshot request for:{ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:31:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(316): Creation time not specified, setting to:1732753913643 (current time:1732753913643). 2024-11-28T00:31:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(330): Snapshot current TTL value: 0 resetting it to default value: 0 2024-11-28T00:31:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(339): Snapshot snaptb0-testExportFileSystemStateWithSkipTmp VERSION not specified, setting to 2 2024-11-28T00:31:53,643 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotDescriptionUtils(346): Set jenkins as owner of Snapshot 2024-11-28T00:31:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c0bf02b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:31:53,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:31:53,645 DEBUG [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@784c918, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:31:53,645 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,646 INFO [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:31:53,646 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33194de6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,647 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:31:53,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:31:53,647 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,648 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,649 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:31:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.isSecurityAvailable(SnapshotDescriptionUtils.java:481) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:353) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:31:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,649 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,649 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:31:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a4bb2c9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ClusterIdFetcher(90): Going to request 72c8c7f04009,39673,-1 for getting cluster id 2024-11-28T00:31:53,650 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-28T00:31:53,650 DEBUG [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0971976f-020a-48db-8700-e23062cd273e' 2024-11-28T00:31:53,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-28T00:31:53,650 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0971976f-020a-48db-8700-e23062cd273e" 2024-11-28T00:31:53,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@778a9791, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [72c8c7f04009,39673,-1] 2024-11-28T00:31:53,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-28T00:31:53,651 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,651 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38748, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-28T00:31:53,652 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a0eadb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-28T00:31:53,652 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-28T00:31:53,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=72c8c7f04009,36427,1732753506332, seqNum=-1] 2024-11-28T00:31:53,653 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,653 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47562, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,654 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'hbase:acl', row='testtb-testExportFileSystemStateWithSkipTmp', locateType=CURRENT is [region=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., hostname=72c8c7f04009,41621,1732753506207, seqNum=2] 2024-11-28T00:31:53,655 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-28T00:31:53,655 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-4-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36422, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-28T00:31:53,656 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(233): Connection has been closed by RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673. 2024-11-28T00:31:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.client.ConnectionOverAsyncConnection.close(ConnectionOverAsyncConnection.java:134) at org.apache.hadoop.hbase.security.access.PermissionStorage.getPermissions(PermissionStorage.java:522) at org.apache.hadoop.hbase.security.access.PermissionStorage.getTablePermissions(PermissionStorage.java:485) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:490) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils$1.run(SnapshotDescriptionUtils.java:487) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.security.SecurityUtil.doAsUser(SecurityUtil.java:555) at org.apache.hadoop.security.SecurityUtil.doAsLoginUser(SecurityUtil.java:535) at jdk.internal.reflect.GeneratedMethodAccessor227.invoke(Unknown Source) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.apache.hadoop.hbase.util.Methods.call(Methods.java:39) at org.apache.hadoop.hbase.security.User.runAsLoginUser(User.java:174) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.writeAclToSnapshotDescription(SnapshotDescriptionUtils.java:487) at org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.validate(SnapshotDescriptionUtils.java:354) at org.apache.hadoop.hbase.master.MasterRpcServices.snapshot(MasterRpcServices.java:1767) at org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$2.callBlockingMethod(MasterProtos.java) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-28T00:31:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:31:53,656 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:31:53,656 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] access.PermissionStorage(613): Read acl: entry[testtb-testExportFileSystemStateWithSkipTmp], kv [jenkins: RWXCA] 2024-11-28T00:31:53,657 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(810): No existing snapshot, attempting snapshot... 2024-11-28T00:31:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } 2024-11-28T00:31:53,658 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] snapshot.SnapshotManager(1445): register snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-28T00:31:53,659 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PREPARE 2024-11-28T00:31:53,659 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:31:53,659 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_PRE_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_PRE_OPERATION 2024-11-28T00:31:53,661 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_WRITE_SNAPSHOT_INFO, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_WRITE_SNAPSHOT_INFO 2024-11-28T00:31:53,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742421_1597 (size=198) 2024-11-28T00:31:53,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742421_1597 (size=198) 2024-11-28T00:31:53,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742421_1597 (size=198) 2024-11-28T00:31:53,667 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_ONLINE_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_ONLINE_REGIONS 2024-11-28T00:31:53,667 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a}, {pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c}] 2024-11-28T00:31:53,668 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,668 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,761 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:31:53,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=36427 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=244 2024-11-28T00:31:53,820 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43335 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.SnapshotRegionCallable, pid=243 2024-11-28T00:31:53,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,820 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(50): Starting snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:53,820 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2902): Flushing ae9a96d161250eea1514d4fe40999a5a 1/1 column families, dataSize=199 B heapSize=688 B 2024-11-28T00:31:53,820 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2902): Flushing 8ba3ca265e3afad3d2e97b99b7b3f70c 1/1 column families, dataSize=3.06 KB heapSize=6.86 KB 2024-11-28T00:31:53,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a is 71, key is 012005bcb8739d2d60a8a57b6f5c0108/cf:q/1732753913626/Put/seqid=0 2024-11-28T00:31:53,838 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c is 71, key is 17e6f5e16b382ab17101f833afdbe73c/cf:q/1732753913628/Put/seqid=0 2024-11-28T00:31:53,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742423_1599 (size=8171) 2024-11-28T00:31:53,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742423_1599 (size=8171) 2024-11-28T00:31:53,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742422_1598 (size=5102) 2024-11-28T00:31:53,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742422_1598 (size=5102) 2024-11-28T00:31:53,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742422_1598 (size=5102) 2024-11-28T00:31:53,844 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742423_1599 (size=8171) 2024-11-28T00:31:53,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:53,844 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:53,848 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,848 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HMobStore(268): FLUSH Renaming flushed file from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/.tmp/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/.tmp/cf/2cab0e7f2e5a4b2295ca2bb97a572757, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=ae9a96d161250eea1514d4fe40999a5a] 2024-11-28T00:31:53,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(263): Flush store file: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/.tmp/cf/3eda430da81d407f9b897a14eef751d4, store: [table=testtb-testExportFileSystemStateWithSkipTmp family=cf region=8ba3ca265e3afad3d2e97b99b7b3f70c] 2024-11-28T00:31:53,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/.tmp/cf/3eda430da81d407f9b897a14eef751d4 is 220, key is 1e63712e52b73ad64c467cd8025589747/cf:q/1732753913628/Put/seqid=0 2024-11-28T00:31:53,849 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/.tmp/cf/2cab0e7f2e5a4b2295ca2bb97a572757 is 220, key is 05125a5f72bae6359954834ad7678820a/cf:q/1732753913626/Put/seqid=0 2024-11-28T00:31:53,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742424_1600 (size=5962) 2024-11-28T00:31:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742425_1601 (size=15525) 2024-11-28T00:31:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742424_1600 (size=5962) 2024-11-28T00:31:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742425_1601 (size=15525) 2024-11-28T00:31:53,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742424_1600 (size=5962) 2024-11-28T00:31:53,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742425_1601 (size=15525) 2024-11-28T00:31:53,855 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=199, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/.tmp/cf/2cab0e7f2e5a4b2295ca2bb97a572757 2024-11-28T00:31:53,855 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] mob.DefaultMobStoreFlusher(147): Mob store is flushed, sequenceid=6, memsize=3.1 K, hasBloomFilter=true, into tmp file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/.tmp/cf/3eda430da81d407f9b897a14eef751d4 2024-11-28T00:31:53,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/.tmp/cf/2cab0e7f2e5a4b2295ca2bb97a572757 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757 2024-11-28T00:31:53,859 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/.tmp/cf/3eda430da81d407f9b897a14eef751d4 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4 2024-11-28T00:31:53,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757, entries=3, sequenceid=6, filesize=5.8 K 2024-11-28T00:31:53,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4, entries=47, sequenceid=6, filesize=15.2 K 2024-11-28T00:31:53,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(3140): Finished flush of dataSize ~199 B/199, heapSize ~672 B/672, currentSize=0 B/0 for ae9a96d161250eea1514d4fe40999a5a in 43ms, sequenceid=6, compaction requested=false 2024-11-28T00:31:53,863 INFO [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(3140): Finished flush of dataSize ~3.06 KB/3137, heapSize ~6.84 KB/7008, currentSize=0 B/0 for 8ba3ca265e3afad3d2e97b99b7b3f70c in 43ms, sequenceid=6, compaction requested=false 2024-11-28T00:31:53,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-28T00:31:53,863 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'testtb-testExportFileSystemStateWithSkipTmp' 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.HRegion(2603): Flush status journal for ae9a96d161250eea1514d4fe40999a5a: 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.HRegion(2603): Flush status journal for 8ba3ca265e3afad3d2e97b99b7b3f70c: 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(75): Snapshotting region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. for snaptb0-testExportFileSystemStateWithSkipTmp completed. 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(241): Storing 'testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.' region-info for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(246): Creating references for hfiles 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4] hfiles 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(256): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757] hfiles 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,864 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] snapshot.SnapshotManifest(265): Adding reference for file (1/1): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757 for snapshot=snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742426_1602 (size=121) 2024-11-28T00:31:53,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742427_1603 (size=121) 2024-11-28T00:31:53,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742427_1603 (size=121) 2024-11-28T00:31:53,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742427_1603 (size=121) 2024-11-28T00:31:53,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742426_1602 (size=121) 2024-11-28T00:31:53,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:31:53,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-1 {event_type=RS_SNAPSHOT_REGIONS, pid=244}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=244 2024-11-28T00:31:53,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742426_1602 (size=121) 2024-11-28T00:31:53,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.SnapshotRegionCallable(78): Closing snapshot operation on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:31:53,871 DEBUG [RS_SNAPSHOT_OPERATIONS-regionserver/72c8c7f04009:0-0 {event_type=RS_SNAPSHOT_REGIONS, pid=243}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=243 2024-11-28T00:31:53,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=244 2024-11-28T00:31:53,871 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster(4169): Remote procedure done, pid=243 2024-11-28T00:31:53,871 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,871 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure.SnapshotRegionProcedure(132): finish snapshot snaptb0-testExportFileSystemStateWithSkipTmp on region ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,872 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=243, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,872 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=244, ppid=242, state=RUNNABLE, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,873 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=243, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure ae9a96d161250eea1514d4fe40999a5a in 205 msec 2024-11-28T00:31:53,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=244, resume processing ppid=242 2024-11-28T00:31:53,874 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=244, ppid=242, state=SUCCESS, hasLock=false; SnapshotRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c in 206 msec 2024-11-28T00:31:53,874 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_SPLIT_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_SPLIT_REGIONS 2024-11-28T00:31:53,875 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_SNAPSHOT_MOB_REGION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_SNAPSHOT_MOB_REGION 2024-11-28T00:31:53,876 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(300): Storing region-info for snapshot. 2024-11-28T00:31:53,876 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(305): Creating references for hfiles 2024-11-28T00:31:53,876 DEBUG [MobRegionSnapshotPool-pool-0 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-28T00:31:53,877 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(366): Adding snapshot references for [hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a] hfiles 2024-11-28T00:31:53,877 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (1/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:31:53,877 DEBUG [MobRegionSnapshotPool-pool-0 {}] snapshot.SnapshotManifest(374): Adding reference for hfile (2/2): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:31:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742428_1604 (size=305) 2024-11-28T00:31:53,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742428_1604 (size=305) 2024-11-28T00:31:53,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742428_1604 (size=305) 2024-11-28T00:31:53,883 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_CONSOLIDATE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_CONSOLIDATE_SNAPSHOT 2024-11-28T00:31:53,883 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifest(495): Convert to Single Snapshot Manifest for snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,883 DEBUG [PEWorker-2 {}] snapshot.SnapshotManifestV1(130): No regions under directory:hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742429_1605 (size=1007) 2024-11-28T00:31:53,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742429_1605 (size=1007) 2024-11-28T00:31:53,893 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742429_1605 (size=1007) 2024-11-28T00:31:53,901 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_VERIFIER_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_VERIFIER_SNAPSHOT 2024-11-28T00:31:53,913 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_COMPLETE_SNAPSHOT, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_COMPLETE_SNAPSHOT 2024-11-28T00:31:53,914 DEBUG [PEWorker-2 {}] snapshot.SnapshotDescriptionUtils(414): Sentinel is done, just moving the snapshot from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/.tmp/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:53,915 INFO [PEWorker-2 {}] procedure.SnapshotProcedure(134): pid=242, state=RUNNABLE:SNAPSHOT_POST_OPERATION, hasLock=true; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } execute state=SNAPSHOT_POST_OPERATION 2024-11-28T00:31:53,915 DEBUG [PEWorker-2 {}] snapshot.SnapshotManager(1451): unregister snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 }, snapshot procedure id = 242 2024-11-28T00:31:53,916 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=242, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.SnapshotProcedure, id=242, snapshot={ ss=snaptb0-testExportFileSystemStateWithSkipTmp table=testtb-testExportFileSystemStateWithSkipTmp type=FLUSH ttl=0 } in 258 msec 2024-11-28T00:31:53,971 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=242 2024-11-28T00:31:53,972 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: SNAPSHOT, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:31:53,972 INFO [Time-limited test {}] snapshot.TestExportSnapshot(515): HDFS export destination path: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972 2024-11-28T00:31:53,972 INFO [Time-limited test {}] snapshot.TestExportSnapshot(542): tgtFsUri=hdfs://localhost:43901, tgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972, rawTgtDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972, srcFsUri=hdfs://localhost:43901, srcDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:54,006 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1094): inputFs=hdfs://localhost:43901, inputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a 2024-11-28T00:31:54,006 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(1095): outputFs=DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]], outputRoot=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972, skipTmp=true, initialOutputSnapshotDir=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:54,007 INFO [Time-limited test {}] snapshot.ExportSnapshot(1104): Verify the source snapshot's expiration status and integrity. 2024-11-28T00:31:54,012 INFO [Time-limited test {}] snapshot.ExportSnapshot(1162): Copy Snapshot Manifest from hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:54,025 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742430_1606 (size=1007) 2024-11-28T00:31:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742430_1606 (size=1007) 2024-11-28T00:31:54,026 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742430_1606 (size=1007) 2024-11-28T00:31:54,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742431_1607 (size=198) 2024-11-28T00:31:54,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742431_1607 (size=198) 2024-11-28T00:31:54,034 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742431_1607 (size=198) 2024-11-28T00:31:54,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.HConstants, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-common/target/hbase-common-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:54,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-protocol-shaded/target/hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:54,036 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.client.Put, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-client/target/hbase-client-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:54,957 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 91195ca5c70c54af0f84245ffc432b72, had cached 0 bytes from a total of 5791 2024-11-28T00:31:55,277 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.ipc.RpcServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-3518104593376348715.jar 2024-11-28T00:31:55,278 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.CompatibilityFactory, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,278 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.JobUtil, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-hadoop-compat/target/hbase-hadoop-compat-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,360 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.mapreduce.TableMapper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop-4991311933494335013.jar 2024-11-28T00:31:55,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.impl.FastLongHistogram, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics/target/hbase-metrics-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,361 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.metrics.Snapshot, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-metrics-api/target/hbase-metrics-api-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.replication.ReplicationUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-replication/target/hbase-replication-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.http.HttpServer, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-http/target/hbase-http-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,362 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.procedure2.Procedure, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-procedure/target/hbase-procedure-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.zookeeper.ZKWatcher, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-zookeeper/target/hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar 2024-11-28T00:31:55,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.common.collect.Lists, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-miscellaneous/4.1.9/hbase-shaded-miscellaneous-4.1.9.jar 2024-11-28T00:31:55,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.gson.GsonBuilder, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-gson/4.1.9/hbase-shaded-gson-4.1.9.jar 2024-11-28T00:31:55,363 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.com.google.protobuf.UnsafeByteOperations, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-protobuf/4.1.9/hbase-shaded-protobuf-4.1.9.jar 2024-11-28T00:31:55,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hbase.thirdparty.io.netty.channel.Channel, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-shaded-netty/4.1.9/hbase-shaded-netty-4.1.9.jar 2024-11-28T00:31:55,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.hbase.unsafe.HBasePlatformDependent, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hbase/thirdparty/hbase-unsafe/4.1.9/hbase-unsafe-4.1.9.jar 2024-11-28T00:31:55,364 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.zookeeper.ZooKeeper, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/zookeeper/zookeeper/3.8.4/zookeeper-3.8.4.jar 2024-11-28T00:31:55,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class com.codahale.metrics.MetricRegistry, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/dropwizard/metrics/metrics-core/3.2.6/metrics-core-3.2.6.jar 2024-11-28T00:31:55,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.commons.lang3.ArrayUtils, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/commons/commons-lang3/3.9/commons-lang3-3.9.jar 2024-11-28T00:31:55,365 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.api.trace.Span, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-api/1.15.0/opentelemetry-api-1.15.0.jar 2024-11-28T00:31:55,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.semconv.trace.attributes.SemanticAttributes, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-semconv/1.15.0-alpha/opentelemetry-semconv-1.15.0-alpha.jar 2024-11-28T00:31:55,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class io.opentelemetry.context.Context, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/io/opentelemetry/opentelemetry-context/1.15.0/opentelemetry-context-1.15.0.jar 2024-11-28T00:31:55,366 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:55,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:55,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.input.TextInputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:55,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.LongWritable, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:55,367 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.io.Text, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-common/3.4.1/hadoop-common-3.4.1.jar 2024-11-28T00:31:55,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.output.TextOutputFormat, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:55,368 DEBUG [Time-limited test {}] mapreduce.TableMapReduceUtil(972): For class org.apache.hadoop.mapreduce.lib.partition.HashPartitioner, using jar /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-mapreduce-client-core/3.4.1/hadoop-mapreduce-client-core-3.4.1.jar 2024-11-28T00:31:55,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742432_1608 (size=131440) 2024-11-28T00:31:55,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742432_1608 (size=131440) 2024-11-28T00:31:55,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742432_1608 (size=131440) 2024-11-28T00:31:55,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742433_1609 (size=4188619) 2024-11-28T00:31:55,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742433_1609 (size=4188619) 2024-11-28T00:31:55,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742433_1609 (size=4188619) 2024-11-28T00:31:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:31:55,541 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp Metrics about Tables on a single HBase RegionServer 2024-11-28T00:31:55,542 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportWithChecksum 2024-11-28T00:31:55,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742434_1610 (size=1323991) 2024-11-28T00:31:55,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742434_1610 (size=1323991) 2024-11-28T00:31:55,543 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742434_1610 (size=1323991) 2024-11-28T00:31:55,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742435_1611 (size=903927) 2024-11-28T00:31:55,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742435_1611 (size=903927) 2024-11-28T00:31:55,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742435_1611 (size=903927) 2024-11-28T00:31:56,085 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742436_1612 (size=8360083) 2024-11-28T00:31:56,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742436_1612 (size=8360083) 2024-11-28T00:31:56,086 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742436_1612 (size=8360083) 2024-11-28T00:31:56,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742437_1613 (size=440957) 2024-11-28T00:31:56,119 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742437_1613 (size=440957) 2024-11-28T00:31:56,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742437_1613 (size=440957) 2024-11-28T00:31:56,151 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742438_1614 (size=1877034) 2024-11-28T00:31:56,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742438_1614 (size=1877034) 2024-11-28T00:31:56,152 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742438_1614 (size=1877034) 2024-11-28T00:31:56,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742439_1615 (size=77835) 2024-11-28T00:31:56,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742439_1615 (size=77835) 2024-11-28T00:31:56,174 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742439_1615 (size=77835) 2024-11-28T00:31:56,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742440_1616 (size=30949) 2024-11-28T00:31:56,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742440_1616 (size=30949) 2024-11-28T00:31:56,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742440_1616 (size=30949) 2024-11-28T00:31:56,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742441_1617 (size=1597213) 2024-11-28T00:31:56,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742441_1617 (size=1597213) 2024-11-28T00:31:56,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742441_1617 (size=1597213) 2024-11-28T00:31:56,291 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742442_1618 (size=4695811) 2024-11-28T00:31:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742442_1618 (size=4695811) 2024-11-28T00:31:56,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742442_1618 (size=4695811) 2024-11-28T00:31:56,371 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742443_1619 (size=6424743) 2024-11-28T00:31:56,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742443_1619 (size=6424743) 2024-11-28T00:31:56,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742443_1619 (size=6424743) 2024-11-28T00:31:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742444_1620 (size=232957) 2024-11-28T00:31:56,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742444_1620 (size=232957) 2024-11-28T00:31:56,485 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742444_1620 (size=232957) 2024-11-28T00:31:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742445_1621 (size=127628) 2024-11-28T00:31:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742445_1621 (size=127628) 2024-11-28T00:31:56,511 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742445_1621 (size=127628) 2024-11-28T00:31:56,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742446_1622 (size=20406) 2024-11-28T00:31:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742446_1622 (size=20406) 2024-11-28T00:31:56,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742446_1622 (size=20406) 2024-11-28T00:31:56,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742447_1623 (size=5175431) 2024-11-28T00:31:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742447_1623 (size=5175431) 2024-11-28T00:31:56,593 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742447_1623 (size=5175431) 2024-11-28T00:31:56,635 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742448_1624 (size=217634) 2024-11-28T00:31:56,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742448_1624 (size=217634) 2024-11-28T00:31:56,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742448_1624 (size=217634) 2024-11-28T00:31:56,731 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742449_1625 (size=1832290) 2024-11-28T00:31:56,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742449_1625 (size=1832290) 2024-11-28T00:31:56,733 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742449_1625 (size=1832290) 2024-11-28T00:31:56,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742450_1626 (size=322274) 2024-11-28T00:31:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742450_1626 (size=322274) 2024-11-28T00:31:56,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742450_1626 (size=322274) 2024-11-28T00:31:56,856 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0010_000001 (auth:SIMPLE) from 127.0.0.1:57136 2024-11-28T00:31:56,881 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000001/launch_container.sh] 2024-11-28T00:31:56,881 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000001/container_tokens] 2024-11-28T00:31:56,881 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_2/usercache/jenkins/appcache/application_1732753513471_0010/container_1732753513471_0010_01_000001/sysfs] 2024-11-28T00:31:56,912 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742451_1627 (size=503880) 2024-11-28T00:31:56,915 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742451_1627 (size=503880) 2024-11-28T00:31:56,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742451_1627 (size=503880) 2024-11-28T00:31:56,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742452_1628 (size=29229) 2024-11-28T00:31:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742452_1628 (size=29229) 2024-11-28T00:31:56,985 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742452_1628 (size=29229) 2024-11-28T00:31:57,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742453_1629 (size=24096) 2024-11-28T00:31:57,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742453_1629 (size=24096) 2024-11-28T00:31:57,060 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742453_1629 (size=24096) 2024-11-28T00:31:57,104 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742454_1630 (size=111872) 2024-11-28T00:31:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742454_1630 (size=111872) 2024-11-28T00:31:57,105 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742454_1630 (size=111872) 2024-11-28T00:31:57,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742455_1631 (size=45609) 2024-11-28T00:31:57,146 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742455_1631 (size=45609) 2024-11-28T00:31:57,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742455_1631 (size=45609) 2024-11-28T00:31:57,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742456_1632 (size=136454) 2024-11-28T00:31:57,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742456_1632 (size=136454) 2024-11-28T00:31:57,172 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742456_1632 (size=136454) 2024-11-28T00:31:57,174 WARN [Time-limited test {}] mapreduce.JobResourceUploader(481): No job jar file set. User classes may not be found. See Job or Job#setJar(String). 2024-11-28T00:31:57,177 INFO [Time-limited test {}] snapshot.ExportSnapshot(663): Loading Snapshot 'snaptb0-testExportFileSystemStateWithSkipTmp' hfile list 2024-11-28T00:31:57,179 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=0 size=15.2 K 2024-11-28T00:31:57,179 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=1 size=8.0 K 2024-11-28T00:31:57,179 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=2 size=5.8 K 2024-11-28T00:31:57,179 DEBUG [Time-limited test {}] snapshot.ExportSnapshot(763): export split=3 size=5.0 K 2024-11-28T00:31:57,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742457_1633 (size=1079) 2024-11-28T00:31:57,193 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742457_1633 (size=1079) 2024-11-28T00:31:57,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742457_1633 (size=1079) 2024-11-28T00:31:57,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742458_1634 (size=35) 2024-11-28T00:31:57,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742458_1634 (size=35) 2024-11-28T00:31:57,215 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742458_1634 (size=35) 2024-11-28T00:31:57,243 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742459_1635 (size=304166) 2024-11-28T00:31:57,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742459_1635 (size=304166) 2024-11-28T00:31:57,244 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742459_1635 (size=304166) 2024-11-28T00:31:57,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(880): maximum-am-resource-percent is insufficient to start a single application in queue, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:31:57,261 WARN [SchedulerEventDispatcher:Event Processor {}] capacity.AbstractLeafQueue(913): maximum-am-resource-percent is insufficient to start a single application in queue for user, it is likely set too low. skipping enforcement to allow at least one application to start 2024-11-28T00:31:57,536 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:37166 2024-11-28T00:31:57,864 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:32:03,079 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:41342 2024-11-28T00:32:03,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742460_1636 (size=349888) 2024-11-28T00:32:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742460_1636 (size=349888) 2024-11-28T00:32:03,313 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742460_1636 (size=349888) 2024-11-28T00:32:03,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:32:05,319 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:42662 2024-11-28T00:32:05,319 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:46540 2024-11-28T00:32:06,164 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:42672 2024-11-28T00:32:06,165 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:46544 2024-11-28T00:32:08,815 WARN [NM Event dispatcher {}] containermanager.ContainerManagerImpl(1784): couldn't find container container_1732753513471_0011_01_000006 while processing FINISH_CONTAINERS event 2024-11-28T00:32:09,610 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region 8ba3ca265e3afad3d2e97b99b7b3f70c changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:32:09,610 DEBUG [master/72c8c7f04009:0.Chore.1 {}] balancer.RegionHDFSBlockLocationFinder(159): Locality for region ae9a96d161250eea1514d4fe40999a5a changed from -1.0 to 0.0, refreshing cache 2024-11-28T00:32:10,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742461_1637 (size=15525) 2024-11-28T00:32:10,949 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742461_1637 (size=15525) 2024-11-28T00:32:10,952 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742461_1637 (size=15525) 2024-11-28T00:32:11,262 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000002/launch_container.sh] 2024-11-28T00:32:11,263 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000002/container_tokens] 2024-11-28T00:32:11,263 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000002/sysfs] 2024-11-28T00:32:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742463_1639 (size=8171) 2024-11-28T00:32:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742463_1639 (size=8171) 2024-11-28T00:32:12,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742463_1639 (size=8171) 2024-11-28T00:32:12,516 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000003/launch_container.sh] 2024-11-28T00:32:12,517 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000003/container_tokens] 2024-11-28T00:32:12,517 WARN [ContainersLauncher #2 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_1/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000003/sysfs] 2024-11-28T00:32:12,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742464_1640 (size=5102) 2024-11-28T00:32:12,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742464_1640 (size=5102) 2024-11-28T00:32:12,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742464_1640 (size=5102) 2024-11-28T00:32:12,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742465_1641 (size=5962) 2024-11-28T00:32:12,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742465_1641 (size=5962) 2024-11-28T00:32:12,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742465_1641 (size=5962) 2024-11-28T00:32:12,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742462_1638 (size=31803) 2024-11-28T00:32:12,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742462_1638 (size=31803) 2024-11-28T00:32:12,723 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742462_1638 (size=31803) 2024-11-28T00:32:12,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742466_1642 (size=477) 2024-11-28T00:32:12,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742466_1642 (size=477) 2024-11-28T00:32:12,754 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742466_1642 (size=477) 2024-11-28T00:32:12,766 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000005/launch_container.sh] 2024-11-28T00:32:12,766 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000005/container_tokens] 2024-11-28T00:32:12,766 WARN [ContainersLauncher #6 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-0_3/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000005/sysfs] 2024-11-28T00:32:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742467_1643 (size=31803) 2024-11-28T00:32:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742467_1643 (size=31803) 2024-11-28T00:32:12,789 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742467_1643 (size=31803) 2024-11-28T00:32:12,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742468_1644 (size=349888) 2024-11-28T00:32:12,806 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742468_1644 (size=349888) 2024-11-28T00:32:12,807 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742468_1644 (size=349888) 2024-11-28T00:32:12,809 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000004/launch_container.sh] 2024-11-28T00:32:12,809 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000004/container_tokens] 2024-11-28T00:32:12,809 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000004/sysfs] 2024-11-28T00:32:12,821 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:37432 2024-11-28T00:32:12,828 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:59898 2024-11-28T00:32:12,832 INFO [Socket Reader #1 for port 0 {}] ipc.Server$Connection(2290): Auth successful for appattempt_1732753513471_0011_000001 (auth:SIMPLE) from 127.0.0.1:37448 2024-11-28T00:32:14,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1219): Finalize the Snapshot Export 2024-11-28T00:32:14,605 INFO [Time-limited test {}] snapshot.ExportSnapshot(1230): Verify the exported snapshot's expiration status and integrity. 2024-11-28T00:32:14,611 INFO [Time-limited test {}] snapshot.ExportSnapshot(1236): Export Completed: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(409): Exported snapshot 2024-11-28T00:32:14,612 INFO [Time-limited test {}] snapshot.TestExportSnapshot(420): Verified filesystem state 2024-11-28T00:32:14,612 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,612 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-28T00:32:14,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-28T00:32:14,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(495): List files in DFS[DFSClient[clientName=DFSClient_NONMAPREDUCE_-519179657_22, ugi=jenkins (auth:SIMPLE)]] in root hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp at hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/.snapshotinfo 2024-11-28T00:32:14,613 DEBUG [Time-limited test {}] snapshot.TestExportSnapshot(500): hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/export-test/export-1732753913972/.hbase-snapshot/snaptb0-testExportFileSystemStateWithSkipTmp/data.manifest 2024-11-28T00:32:14,619 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$13(2820): Client=jenkins//172.17.0.2 disable testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,620 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=245, state=RUNNABLE:DISABLE_TABLE_PREPARE, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,622 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:32:14,622 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753934622"}]},"ts":"1732753934622"} 2024-11-28T00:32:14,624 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLING in hbase:meta 2024-11-28T00:32:14,624 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(284): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLING 2024-11-28T00:32:14,624 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=246, ppid=245, state=RUNNABLE:CLOSE_TABLE_REGIONS_SCHEDULE, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp}] 2024-11-28T00:32:14,626 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, UNASSIGN}, {pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, UNASSIGN}] 2024-11-28T00:32:14,626 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, UNASSIGN 2024-11-28T00:32:14,626 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, UNASSIGN 2024-11-28T00:32:14,627 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=8ba3ca265e3afad3d2e97b99b7b3f70c, regionState=CLOSING, regionLocation=72c8c7f04009,36427,1732753506332 2024-11-28T00:32:14,627 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=ae9a96d161250eea1514d4fe40999a5a, regionState=CLOSING, regionLocation=72c8c7f04009,43335,1732753506004 2024-11-28T00:32:14,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=247, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, UNASSIGN because future has completed 2024-11-28T00:32:14,629 DEBUG [PEWorker-2 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:32:14,629 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004}] 2024-11-28T00:32:14,629 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=248, ppid=246, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, UNASSIGN because future has completed 2024-11-28T00:32:14,630 DEBUG [PEWorker-1 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: false: evictOnSplit: true: evictOnClose: false 2024-11-28T00:32:14,630 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332}] 2024-11-28T00:32:14,731 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:32:14,781 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(122): Close ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:32:14,781 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1722): Closing ae9a96d161250eea1514d4fe40999a5a, disabling compactions & flushes 2024-11-28T00:32:14,782 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. after waiting 0 ms 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:32:14,782 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(122): Close 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(136): Unassign region: split region: false: evictCache: false 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1722): Closing 8ba3ca265e3afad3d2e97b99b7b3f70c, disabling compactions & flushes 2024-11-28T00:32:14,782 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1755): Closing region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1776): Time limited wait for close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1843): Acquired close lock on testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. after waiting 0 ms 2024-11-28T00:32:14,782 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1853): Updates disabled for region testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:32:14,785 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:32:14,786 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-28T00:32:14,786 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:14,786 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a. 2024-11-28T00:32:14,786 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] regionserver.HRegion(1676): Region close journal for ae9a96d161250eea1514d4fe40999a5a: Waiting for close lock at 1732753934781Running coprocessor pre-close hooks at 1732753934781Disabling compacts and flushes for region at 1732753934781Disabling writes for close at 1732753934782 (+1 ms)Writing region close event to WAL at 1732753934782Running coprocessor post-close hooks at 1732753934786 (+4 ms)Closed at 1732753934786 2024-11-28T00:32:14,786 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:14,786 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1973): Closed testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c. 2024-11-28T00:32:14,786 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] regionserver.HRegion(1676): Region close journal for 8ba3ca265e3afad3d2e97b99b7b3f70c: Waiting for close lock at 1732753934782Running coprocessor pre-close hooks at 1732753934782Disabling compacts and flushes for region at 1732753934782Disabling writes for close at 1732753934782Writing region close event to WAL at 1732753934783 (+1 ms)Running coprocessor post-close hooks at 1732753934786 (+3 ms)Closed at 1732753934786 2024-11-28T00:32:14,788 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=249}] handler.UnassignRegionHandler(157): Closed ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:32:14,788 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=247 updating hbase:meta row=ae9a96d161250eea1514d4fe40999a5a, regionState=CLOSED 2024-11-28T00:32:14,788 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION, pid=250}] handler.UnassignRegionHandler(157): Closed 8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:32:14,789 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=248 updating hbase:meta row=8ba3ca265e3afad3d2e97b99b7b3f70c, regionState=CLOSED 2024-11-28T00:32:14,790 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=249, ppid=247, state=RUNNABLE, hasLock=false; CloseRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004 because future has completed 2024-11-28T00:32:14,791 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-5-3 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=250, ppid=248, state=RUNNABLE, hasLock=false; CloseRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332 because future has completed 2024-11-28T00:32:14,792 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=249, resume processing ppid=247 2024-11-28T00:32:14,793 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=249, ppid=247, state=SUCCESS, hasLock=false; CloseRegionProcedure ae9a96d161250eea1514d4fe40999a5a, server=72c8c7f04009,43335,1732753506004 in 162 msec 2024-11-28T00:32:14,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=250, resume processing ppid=248 2024-11-28T00:32:14,793 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=250, ppid=248, state=SUCCESS, hasLock=false; CloseRegionProcedure 8ba3ca265e3afad3d2e97b99b7b3f70c, server=72c8c7f04009,36427,1732753506332 in 162 msec 2024-11-28T00:32:14,794 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=247, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=ae9a96d161250eea1514d4fe40999a5a, UNASSIGN in 166 msec 2024-11-28T00:32:14,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=248, resume processing ppid=246 2024-11-28T00:32:14,795 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=248, ppid=246, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=testtb-testExportFileSystemStateWithSkipTmp, region=8ba3ca265e3afad3d2e97b99b7b3f70c, UNASSIGN in 167 msec 2024-11-28T00:32:14,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=246, resume processing ppid=245 2024-11-28T00:32:14,797 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=246, ppid=245, state=SUCCESS, hasLock=false; CloseTableRegionsProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 171 msec 2024-11-28T00:32:14,798 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732753934798"}]},"ts":"1732753934798"} 2024-11-28T00:32:14,799 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=testtb-testExportFileSystemStateWithSkipTmp, state=DISABLED in hbase:meta 2024-11-28T00:32:14,799 INFO [PEWorker-1 {}] procedure.DisableTableProcedure(296): Set testtb-testExportFileSystemStateWithSkipTmp to state=DISABLED 2024-11-28T00:32:14,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=245, state=SUCCESS, hasLock=false; DisableTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 180 msec 2024-11-28T00:32:14,941 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=245 2024-11-28T00:32:14,941 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DISABLE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:32:14,942 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.HMaster$5(2570): Client=jenkins//172.17.0.2 delete testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,943 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] procedure2.ProcedureExecutor(1139): Stored pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,943 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(103): Waiting for RIT for pid=251, state=RUNNABLE:DELETE_TABLE_PRE_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,944 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] access.PermissionStorage(261): Removing permissions of removed table testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,944 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(115): Deleting regions from filesystem for pid=251, state=RUNNABLE:DELETE_TABLE_CLEAR_FS_LAYOUT, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,947 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41621 {}] access.PermissionStorage(529): No permissions found in hbase:acl for acl entry testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,948 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:32:14,949 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:32:14,950 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/recovered.edits] 2024-11-28T00:32:14,950 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf, FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/recovered.edits] 2024-11-28T00:32:14,953 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/cf/3eda430da81d407f9b897a14eef751d4 2024-11-28T00:32:14,953 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757 to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/cf/2cab0e7f2e5a4b2295ca2bb97a572757 2024-11-28T00:32:14,956 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c/recovered.edits/9.seqid 2024-11-28T00:32:14,956 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/recovered.edits/9.seqid to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a/recovered.edits/9.seqid 2024-11-28T00:32:14,956 DEBUG [HFileArchiver-27 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:32:14,956 DEBUG [HFileArchiver-26 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testtb-testExportFileSystemStateWithSkipTmp/ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:32:14,956 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(313): Archived testtb-testExportFileSystemStateWithSkipTmp regions 2024-11-28T00:32:14,957 DEBUG [PEWorker-3 {}] backup.HFileArchiver(131): ARCHIVING hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-28T00:32:14,957 DEBUG [PEWorker-3 {}] backup.HFileArchiver(159): Archiving [FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf] 2024-11-28T00:32:14,959 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/c4ca4238a0b923820dcc509a6f75849b202411288010a74bd7dc450b88d66eff41063f9f_8ba3ca265e3afad3d2e97b99b7b3f70c 2024-11-28T00:32:14,960 DEBUG [PEWorker-3 {}] backup.HFileArchiver(596): Archived from FileablePath, hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a to hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/archive/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a/cf/d41d8cd98f00b204e9800998ecf8427e20241128cab8db5d36ac48aab8c48aeb3cf1ddf4_ae9a96d161250eea1514d4fe40999a5a 2024-11-28T00:32:14,961 DEBUG [PEWorker-3 {}] backup.HFileArchiver(610): Deleted hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/mobdir/data/default/testtb-testExportFileSystemStateWithSkipTmp/e9516f04c7d2974b21addb190acc6c0a 2024-11-28T00:32:14,962 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(120): Deleting regions from META for pid=251, state=RUNNABLE:DELETE_TABLE_REMOVE_FROM_META, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,964 WARN [PEWorker-3 {}] procedure.DeleteTableProcedure(370): Deleting some vestigial 2 rows of testtb-testExportFileSystemStateWithSkipTmp from hbase:meta 2024-11-28T00:32:14,966 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(407): Removing 'testtb-testExportFileSystemStateWithSkipTmp' descriptor. 2024-11-28T00:32:14,967 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(126): Deleting assignment state for pid=251, state=RUNNABLE:DELETE_TABLE_UNASSIGN_REGIONS, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,967 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(397): Removing 'testtb-testExportFileSystemStateWithSkipTmp' from region states. 2024-11-28T00:32:14,967 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753934967"}]},"ts":"9223372036854775807"} 2024-11-28T00:32:14,968 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.","families":{"info":[{"qualifier":"","vlen":0,"tag":[],"timestamp":"1732753934967"}]},"ts":"9223372036854775807"} 2024-11-28T00:32:14,969 INFO [PEWorker-3 {}] assignment.RegionStateStore(562): Deleted 2 regions from META 2024-11-28T00:32:14,969 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(563): Deleted regions: [{ENCODED => ae9a96d161250eea1514d4fe40999a5a, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,,1732753912638.ae9a96d161250eea1514d4fe40999a5a.', STARTKEY => '', ENDKEY => '1'}, {ENCODED => 8ba3ca265e3afad3d2e97b99b7b3f70c, NAME => 'testtb-testExportFileSystemStateWithSkipTmp,1,1732753912638.8ba3ca265e3afad3d2e97b99b7b3f70c.', STARTKEY => '1', ENDKEY => ''}] 2024-11-28T00:32:14,969 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(401): Marking 'testtb-testExportFileSystemStateWithSkipTmp' as deleted. 2024-11-28T00:32:14,969 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Delete {"totalColumns":1,"row":"testtb-testExportFileSystemStateWithSkipTmp","families":{"table":[{"qualifier":"state","vlen":0,"tag":[],"timestamp":"1732753934969"}]},"ts":"9223372036854775807"} 2024-11-28T00:32:14,971 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(867): Deleted table testtb-testExportFileSystemStateWithSkipTmp state from META 2024-11-28T00:32:14,972 DEBUG [PEWorker-3 {}] procedure.DeleteTableProcedure(133): Finished pid=251, state=RUNNABLE:DELETE_TABLE_POST_OPERATION, hasLock=true; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:14,973 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=251, state=SUCCESS, hasLock=false; DeleteTableProcedure table=testtb-testExportFileSystemStateWithSkipTmp in 30 msec 2024-11-28T00:32:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,128 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:32:15,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:32:15,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:32:15,129 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testtb-testExportFileSystemStateWithSkipTmp with data PBUF 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/acl/testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:32:15,235 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/acl 2024-11-28T00:32:15,237 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=251 2024-11-28T00:32:15,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:32:15,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:32:15,237 DEBUG [RPCClient-NioEventLoopGroup-6-4 {}] client.AsyncRegionLocator(219): Clear meta cache for testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:32:15,237 DEBUG [zk-permission-watcher-pool-0 {}] access.ZKPermissionWatcher(245): Updating permissions cache from testExportExpiredSnapshot with data PBUF\x0A?\x0A\x07jenkins\x124\x08\x03"0\x0A$\x0A\x07default\x12\x19testExportExpiredSnapshot \x00 \x01 \x02 \x03 \x04 2024-11-28T00:32:15,237 INFO [RPCClient-NioEventLoopGroup-6-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: DELETE, Table Name: default:testtb-testExportFileSystemStateWithSkipTmp completed 2024-11-28T00:32:15,242 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "emptySnaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-28T00:32:15,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: emptySnaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,245 INFO [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] master.MasterRpcServices(838): Client=jenkins//172.17.0.2 delete name: "snaptb0-testExportFileSystemStateWithSkipTmp" type: DISABLED 2024-11-28T00:32:15,246 DEBUG [RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673 {}] snapshot.SnapshotManager(381): Deleting snapshot: snaptb0-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:15,266 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: snapshot.TestMobSecureExportSnapshot#testExportFileSystemStateWithSkipTmp Thread=817 (was 805) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:43832 [Waiting for operation #3] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RSProcedureDispatcher-pool-8 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Thread-9827 java.base@17.0.11/java.io.FileInputStream.readBytes(Native Method) java.base@17.0.11/java.io.FileInputStream.read(FileInputStream.java:276) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:282) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) java.base@17.0.11/sun.nio.cs.StreamDecoder.readBytes(StreamDecoder.java:281) java.base@17.0.11/sun.nio.cs.StreamDecoder.implRead(StreamDecoder.java:324) java.base@17.0.11/sun.nio.cs.StreamDecoder.read(StreamDecoder.java:189) java.base@17.0.11/java.io.InputStreamReader.read(InputStreamReader.java:177) java.base@17.0.11/java.io.BufferedReader.fill(BufferedReader.java:162) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:329) java.base@17.0.11/java.io.BufferedReader.readLine(BufferedReader.java:396) app//org.apache.hadoop.util.Shell$1.run(Shell.java:1025) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-23 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45305 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: process reaper (pid 25693) java.base@17.0.11/java.lang.ProcessHandleImpl.waitForProcessExit0(Native Method) java.base@17.0.11/java.lang.ProcessHandleImpl$1.run(ProcessHandleImpl.java:150) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-27 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:47798 [Waiting for operation #5] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-20 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-519179657_22 at /127.0.0.1:35048 [Waiting for operation #4] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1065665691) connection to localhost/127.0.0.1:45305 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1714490388_1 at /127.0.0.1:47776 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-21 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ProcedureExecutor-Async-Task-Executor-22 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: DataXceiver for client DFSClient_NONMAPREDUCE_-1714490388_1 at /127.0.0.1:43822 [Waiting for operation #2] java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:335) app//org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) app//org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:263) java.base@17.0.11/java.io.DataInputStream.readUnsignedShort(DataInputStream.java:334) java.base@17.0.11/java.io.DataInputStream.readShort(DataInputStream.java:312) app//org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.readOp(Receiver.java:72) app//org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:273) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ApplicationMasterLauncher #19 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HFileArchiver-26 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=793 (was 788) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=979 (was 1034), ProcessCount=18 (was 21), AvailableMemoryMB=2593 (was 2340) - AvailableMemoryMB LEAK? - 2024-11-28T00:32:15,266 WARN [Time-limited test {}] hbase.ResourceChecker(130): Thread=817 is superior to 500 2024-11-28T00:32:15,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2377): Stopping mini mapreduce cluster... 2024-11-28T00:32:15,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1a21ff78{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:32:15,276 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e09654e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:32:15,276 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:32:15,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d2a247c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:32:15,276 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13781459{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:32:15,392 WARN [ContainersLauncher #0 {}] nodemanager.DefaultContainerExecutor(360): Exit code from container container_1732753513471_0011_01_000001 is : 143 2024-11-28T00:32:15,420 WARN [ContainersLauncher #3 {}] nodemanager.DefaultContainerExecutor(697): delete returned false for path: [/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test/data/MiniMRCluster_1596367906/yarn-998508043/MiniMRCluster_1596367906-localDir-nm-1_2/usercache/jenkins/appcache/application_1732753513471_0011/container_1732753513471_0011_01_000001/sysfs] 2024-11-28T00:32:15,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testtb-testExportFileSystemStateWithSkipTmp 2024-11-28T00:32:20,307 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:32:24,203 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-28T00:32:24,203 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=87.77 KB heapSize=138.60 KB 2024-11-28T00:32:24,229 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/info/5ed67744a9044443a8af1dd32c594f6b is 173, key is testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841./info:regioninfo/1732753813602/Put/seqid=0 2024-11-28T00:32:24,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742469_1645 (size=15646) 2024-11-28T00:32:24,234 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742469_1645 (size=15646) 2024-11-28T00:32:24,235 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742469_1645 (size=15646) 2024-11-28T00:32:24,235 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74.76 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/info/5ed67744a9044443a8af1dd32c594f6b 2024-11-28T00:32:24,254 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/ns/666aea88b9c04ea1bb249d7790956c6c is 124, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d./ns:/1732753811277/DeleteFamily/seqid=0 2024-11-28T00:32:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742470_1646 (size=8378) 2024-11-28T00:32:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742470_1646 (size=8378) 2024-11-28T00:32:24,259 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742470_1646 (size=8378) 2024-11-28T00:32:24,260 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.79 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/ns/666aea88b9c04ea1bb249d7790956c6c 2024-11-28T00:32:24,279 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/rep_barrier/2543ce1b157944ccaf2106f06789366d is 133, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d./rep_barrier:/1732753811277/DeleteFamily/seqid=0 2024-11-28T00:32:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742471_1647 (size=8717) 2024-11-28T00:32:24,283 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742471_1647 (size=8717) 2024-11-28T00:32:24,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742471_1647 (size=8717) 2024-11-28T00:32:24,284 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.95 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/rep_barrier/2543ce1b157944ccaf2106f06789366d 2024-11-28T00:32:24,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/table/40f6adfe1ba840849e6ee299559e36db is 127, key is testtb-testExportFileSystemStateWithMergeRegion-1,,1732753787220.9862736ad6a4a00d92d45d3e32dfbb1d./table:/1732753811277/DeleteFamily/seqid=0 2024-11-28T00:32:24,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742472_1648 (size=9531) 2024-11-28T00:32:24,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742472_1648 (size=9531) 2024-11-28T00:32:24,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742472_1648 (size=9531) 2024-11-28T00:32:24,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.27 KB at sequenceid=240 (bloomFilter=true), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/table/40f6adfe1ba840849e6ee299559e36db 2024-11-28T00:32:24,314 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/info/5ed67744a9044443a8af1dd32c594f6b as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/info/5ed67744a9044443a8af1dd32c594f6b 2024-11-28T00:32:24,317 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/info/5ed67744a9044443a8af1dd32c594f6b, entries=84, sequenceid=240, filesize=15.3 K 2024-11-28T00:32:24,318 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/ns/666aea88b9c04ea1bb249d7790956c6c as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/ns/666aea88b9c04ea1bb249d7790956c6c 2024-11-28T00:32:24,321 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/ns/666aea88b9c04ea1bb249d7790956c6c, entries=28, sequenceid=240, filesize=8.2 K 2024-11-28T00:32:24,322 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/rep_barrier/2543ce1b157944ccaf2106f06789366d as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/rep_barrier/2543ce1b157944ccaf2106f06789366d 2024-11-28T00:32:24,325 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/rep_barrier/2543ce1b157944ccaf2106f06789366d, entries=26, sequenceid=240, filesize=8.5 K 2024-11-28T00:32:24,326 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/.tmp/table/40f6adfe1ba840849e6ee299559e36db as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/table/40f6adfe1ba840849e6ee299559e36db 2024-11-28T00:32:24,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/table/40f6adfe1ba840849e6ee299559e36db, entries=43, sequenceid=240, filesize=9.3 K 2024-11-28T00:32:24,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~87.77 KB/89879, heapSize ~138.54 KB/141864, currentSize=0 B/0 for 1588230740 in 128ms, sequenceid=240, compaction requested=false 2024-11-28T00:32:24,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-28T00:32:28,582 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 76fb4953fe949fdbfff19f1d73203841, had cached 0 bytes from a total of 14663 2024-11-28T00:32:28,582 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 3790779e6552562e94070478b4a3d9dc, had cached 0 bytes from a total of 5890 2024-11-28T00:32:32,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5b49ba0a{node,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/node} 2024-11-28T00:32:32,290 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3318f13{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:32:32,290 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:32:32,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6caa23cd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:32:32,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77fc9ec2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:32:33,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:32:38,864 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 42272 2024-11-28T00:32:39,958 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 91195ca5c70c54af0f84245ffc432b72, had cached 0 bytes from a total of 5791 2024-11-28T00:32:49,298 ERROR [Thread[Thread-389,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:32:49,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1548d9d4{cluster,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/cluster} 2024-11-28T00:32:49,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7ab73e22{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:32:49,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:32:49,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ca85fc7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:32:49,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@295a1b72{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:32:49,303 WARN [ApplicationMaster Launcher {}] amlauncher.ApplicationMasterLauncher$LauncherThread(122): org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher$LauncherThread interrupted. Returning. 2024-11-28T00:32:49,308 ERROR [SchedulerEventDispatcher:Event Processor {}] event.EventDispatcher$EventProcessor(72): Returning, interrupted : java.lang.InterruptedException 2024-11-28T00:32:49,309 ERROR [ResourceManager Event Processor Monitor {}] resourcemanager.ResourceManager$SchedulerEventDispatcher$EventProcessorMonitor(1193): Returning, interrupted : java.lang.InterruptedException: sleep interrupted 2024-11-28T00:32:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741830_1006 (size=1174175) 2024-11-28T00:32:49,312 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741830_1006 (size=1174175) 2024-11-28T00:32:49,316 ERROR [Thread[Thread-416,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:32:49,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@648e364d{jobhistory,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/jobhistory} 2024-11-28T00:32:49,320 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40399fe0{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:32:49,320 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:32:49,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@126eaf28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-yarn-common/3.4.1/hadoop-yarn-common-3.4.1.jar!/webapps/static,STOPPED} 2024-11-28T00:32:49,320 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46180687{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:32:49,321 ERROR [Thread[Thread-371,5,FailOnTimeoutGroup] {}] delegation.AbstractDelegationTokenSecretManager$ExpiredTokenRemover(852): ExpiredTokenRemover received java.lang.InterruptedException: sleep interrupted 2024-11-28T00:32:49,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2380): Mini mapreduce cluster stopped 2024-11-28T00:32:49,322 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-28T00:32:49,322 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:32:49,322 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:32:49,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,322 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,322 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-28T00:32:49,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-28T00:32:49,322 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=806974873, stopped=false 2024-11-28T00:32:49,323 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,323 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:32:49,323 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=72c8c7f04009,39673,1732753505055 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-28T00:32:49,325 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-28T00:32:49,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:32:49,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:32:49,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:32:49,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:32:49,326 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-28T00:32:49,326 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-28T00:32:49,327 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:32:49,327 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,327 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '72c8c7f04009,43335,1732753506004' ***** 2024-11-28T00:32:49,327 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,327 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:32:49,327 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '72c8c7f04009,41621,1732753506207' ***** 2024-11-28T00:32:49,327 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,327 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:32:49,327 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '72c8c7f04009,36427,1732753506332' ***** 2024-11-28T00:32:49,328 DEBUG [Time-limited test {}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,328 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-28T00:32:49,328 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:32:49,328 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:32:49,328 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-28T00:32:49,328 INFO [RS:1;72c8c7f04009:41621 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:32:49,328 INFO [RS:0;72c8c7f04009:43335 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:32:49,328 INFO [RS:2;72c8c7f04009:36427 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-28T00:32:49,328 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:32:49,328 INFO [RS:1;72c8c7f04009:41621 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:32:49,328 INFO [RS:0;72c8c7f04009:43335 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:32:49,328 INFO [RS:2;72c8c7f04009:36427 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-28T00:32:49,328 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(3091): Received CLOSE for 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:32:49,328 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(3091): Received CLOSE for 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:32:49,328 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(3091): Received CLOSE for 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:32:49,328 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:32:49,328 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-28T00:32:49,329 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(959): stopping server 72c8c7f04009,41621,1732753506207 2024-11-28T00:32:49,329 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(959): stopping server 72c8c7f04009,43335,1732753506004 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(959): stopping server 72c8c7f04009,36427,1732753506332 2024-11-28T00:32:49,329 INFO [RS:1;72c8c7f04009:41621 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:32:49,329 INFO [RS:0;72c8c7f04009:43335 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:32:49,329 INFO [RS:1;72c8c7f04009:41621 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;72c8c7f04009:41621. 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:2;72c8c7f04009:36427. 2024-11-28T00:32:49,329 INFO [RS:0;72c8c7f04009:43335 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;72c8c7f04009:43335. 2024-11-28T00:32:49,329 DEBUG [RS:1;72c8c7f04009:41621 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:32:49,329 DEBUG [RS:1;72c8c7f04009:41621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,329 DEBUG [RS:2;72c8c7f04009:36427 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:32:49,329 DEBUG [RS:0;72c8c7f04009:43335 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:32:49,329 DEBUG [RS:2;72c8c7f04009:36427 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,329 DEBUG [RS:0;72c8c7f04009:43335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,329 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-28T00:32:49,329 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:32:49,329 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1325): Online Regions={3790779e6552562e94070478b4a3d9dc=testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc.} 2024-11-28T00:32:49,329 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1325): Online Regions={91195ca5c70c54af0f84245ffc432b72=hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72.} 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 91195ca5c70c54af0f84245ffc432b72, disabling compactions & flushes 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3790779e6552562e94070478b4a3d9dc, disabling compactions & flushes 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 76fb4953fe949fdbfff19f1d73203841, disabling compactions & flushes 2024-11-28T00:32:49,329 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:32:49,329 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-28T00:32:49,329 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:32:49,329 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. after waiting 0 ms 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. after waiting 0 ms 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. after waiting 0 ms 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:32:49,329 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:32:49,329 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 91195ca5c70c54af0f84245ffc432b72 1/1 column families, dataSize=190 B heapSize=672 B 2024-11-28T00:32:49,330 DEBUG [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1351): Waiting on 3790779e6552562e94070478b4a3d9dc 2024-11-28T00:32:49,330 DEBUG [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1351): Waiting on 91195ca5c70c54af0f84245ffc432b72 2024-11-28T00:32:49,338 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-28T00:32:49,338 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 76fb4953fe949fdbfff19f1d73203841=testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841.} 2024-11-28T00:32:49,338 DEBUG [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 76fb4953fe949fdbfff19f1d73203841 2024-11-28T00:32:49,338 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-28T00:32:49,338 INFO [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-28T00:32:49,338 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-28T00:32:49,338 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-28T00:32:49,339 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-28T00:32:49,345 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/05a4c7ae3ded4f3999b2ead864366367 is 68, key is testtb-testExportFileSystemStateWithSkipTmp/l:/1732753934944/DeleteFamily/seqid=0 2024-11-28T00:32:49,347 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/3790779e6552562e94070478b4a3d9dc/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:32:49,347 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,347 INFO [regionserver/72c8c7f04009:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,347 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:32:49,347 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3790779e6552562e94070478b4a3d9dc: Waiting for close lock at 1732753969329Running coprocessor pre-close hooks at 1732753969329Disabling compacts and flushes for region at 1732753969329Disabling writes for close at 1732753969329Writing region close event to WAL at 1732753969338 (+9 ms)Running coprocessor post-close hooks at 1732753969347 (+9 ms)Closed at 1732753969347 2024-11-28T00:32:49,348 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,,1732753813232.3790779e6552562e94070478b4a3d9dc. 2024-11-28T00:32:49,352 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/meta/1588230740/recovered.edits/243.seqid, newMaxSeqId=243, maxSeqId=1 2024-11-28T00:32:49,352 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/default/testExportExpiredSnapshot/76fb4953fe949fdbfff19f1d73203841/recovered.edits/8.seqid, newMaxSeqId=8, maxSeqId=1 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:32:49,353 INFO [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-28T00:32:49,353 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732753969338Running coprocessor pre-close hooks at 1732753969338Disabling compacts and flushes for region at 1732753969338Disabling writes for close at 1732753969338Writing region close event to WAL at 1732753969346 (+8 ms)Running coprocessor post-close hooks at 1732753969353 (+7 ms)Closed at 1732753969353 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 76fb4953fe949fdbfff19f1d73203841: Waiting for close lock at 1732753969329Running coprocessor pre-close hooks at 1732753969329Disabling compacts and flushes for region at 1732753969329Disabling writes for close at 1732753969329Writing region close event to WAL at 1732753969339 (+10 ms)Running coprocessor post-close hooks at 1732753969353 (+14 ms)Closed at 1732753969353 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_META-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-28T00:32:49,353 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed testExportExpiredSnapshot,1,1732753813232.76fb4953fe949fdbfff19f1d73203841. 2024-11-28T00:32:49,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742473_1649 (size=5142) 2024-11-28T00:32:49,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742473_1649 (size=5142) 2024-11-28T00:32:49,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742473_1649 (size=5142) 2024-11-28T00:32:49,355 INFO [regionserver/72c8c7f04009:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,356 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=190 B at sequenceid=34 (bloomFilter=false), to=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/05a4c7ae3ded4f3999b2ead864366367 2024-11-28T00:32:49,360 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 05a4c7ae3ded4f3999b2ead864366367 2024-11-28T00:32:49,361 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/.tmp/l/05a4c7ae3ded4f3999b2ead864366367 as hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/l/05a4c7ae3ded4f3999b2ead864366367 2024-11-28T00:32:49,365 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 05a4c7ae3ded4f3999b2ead864366367 2024-11-28T00:32:49,365 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/l/05a4c7ae3ded4f3999b2ead864366367, entries=2, sequenceid=34, filesize=5.0 K 2024-11-28T00:32:49,366 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 91195ca5c70c54af0f84245ffc432b72 in 37ms, sequenceid=34, compaction requested=false 2024-11-28T00:32:49,369 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/data/hbase/acl/91195ca5c70c54af0f84245ffc432b72/recovered.edits/37.seqid, newMaxSeqId=37, maxSeqId=1 2024-11-28T00:32:49,370 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:49,370 INFO [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:32:49,370 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 91195ca5c70c54af0f84245ffc432b72: Waiting for close lock at 1732753969329Running coprocessor pre-close hooks at 1732753969329Disabling compacts and flushes for region at 1732753969329Disabling writes for close at 1732753969329Obtaining lock to block concurrent updates at 1732753969329Preparing flush snapshotting stores in 91195ca5c70c54af0f84245ffc432b72 at 1732753969329Finished memstore snapshotting hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72., syncing WAL and waiting on mvcc, flushsize=dataSize=190, getHeapSize=656, getOffHeapSize=0, getCellsCount=3 at 1732753969330 (+1 ms)Flushing stores of hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. at 1732753969330Flushing 91195ca5c70c54af0f84245ffc432b72/l: creating writer at 1732753969330Flushing 91195ca5c70c54af0f84245ffc432b72/l: appending metadata at 1732753969345 (+15 ms)Flushing 91195ca5c70c54af0f84245ffc432b72/l: closing flushed file at 1732753969345Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7decc365: reopening flushed file at 1732753969360 (+15 ms)Finished flush of dataSize ~190 B/190, heapSize ~656 B/656, currentSize=0 B/0 for 91195ca5c70c54af0f84245ffc432b72 in 37ms, sequenceid=34, compaction requested=false at 1732753969366 (+6 ms)Writing region close event to WAL at 1732753969367 (+1 ms)Running coprocessor post-close hooks at 1732753969370 (+3 ms)Closed at 1732753969370 2024-11-28T00:32:49,370 DEBUG [RS_CLOSE_REGION-regionserver/72c8c7f04009:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed hbase:acl,,1732753509236.91195ca5c70c54af0f84245ffc432b72. 2024-11-28T00:32:49,397 INFO [regionserver/72c8c7f04009:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,530 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(976): stopping server 72c8c7f04009,43335,1732753506004; all regions closed. 2024-11-28T00:32:49,530 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(976): stopping server 72c8c7f04009,41621,1732753506207; all regions closed. 2024-11-28T00:32:49,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741834_1010 (size=17981) 2024-11-28T00:32:49,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741834_1010 (size=17981) 2024-11-28T00:32:49,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741835_1011 (size=12667) 2024-11-28T00:32:49,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741835_1011 (size=12667) 2024-11-28T00:32:49,537 DEBUG [RS:0;72c8c7f04009:43335 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs 2024-11-28T00:32:49,537 DEBUG [RS:1;72c8c7f04009:41621 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs 2024-11-28T00:32:49,537 INFO [RS:0;72c8c7f04009:43335 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 72c8c7f04009%2C43335%2C1732753506004:(num 1732753508405) 2024-11-28T00:32:49,537 INFO [RS:1;72c8c7f04009:41621 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 72c8c7f04009%2C41621%2C1732753506207:(num 1732753508407) 2024-11-28T00:32:49,537 DEBUG [RS:1;72c8c7f04009:41621 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,537 DEBUG [RS:0;72c8c7f04009:43335 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,537 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,537 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,537 INFO [RS:0;72c8c7f04009:43335 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:32:49,537 INFO [RS:1;72c8c7f04009:41621 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] hbase.ChoreService(370): Chore service for: regionserver/72c8c7f04009:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] hbase.ChoreService(370): Chore service for: regionserver/72c8c7f04009:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:32:49,538 INFO [regionserver/72c8c7f04009:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:32:49,538 INFO [regionserver/72c8c7f04009:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:32:49,538 INFO [RS:0;72c8c7f04009:43335 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43335 2024-11-28T00:32:49,538 INFO [RS:1;72c8c7f04009:41621 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41621 2024-11-28T00:32:49,538 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(976): stopping server 72c8c7f04009,36427,1732753506332; all regions closed. 2024-11-28T00:32:49,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741836_1012 (size=102297) 2024-11-28T00:32:49,541 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741836_1012 (size=102297) 2024-11-28T00:32:49,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/72c8c7f04009,41621,1732753506207 2024-11-28T00:32:49,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T00:32:49,543 INFO [RS:1;72c8c7f04009:41621 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:32:49,544 DEBUG [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs 2024-11-28T00:32:49,544 INFO [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 72c8c7f04009%2C36427%2C1732753506332.meta:.meta(num 1732753508822) 2024-11-28T00:32:49,544 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/72c8c7f04009,43335,1732753506004 2024-11-28T00:32:49,545 INFO [RS:0;72c8c7f04009:43335 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:32:49,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073741833_1009 (size=13732) 2024-11-28T00:32:49,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073741833_1009 (size=13732) 2024-11-28T00:32:49,546 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741833_1009 (size=13732) 2024-11-28T00:32:49,546 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [72c8c7f04009,43335,1732753506004] 2024-11-28T00:32:49,547 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/72c8c7f04009,43335,1732753506004 already deleted, retry=false 2024-11-28T00:32:49,547 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 72c8c7f04009,43335,1732753506004 expired; onlineServers=2 2024-11-28T00:32:49,547 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [72c8c7f04009,41621,1732753506207] 2024-11-28T00:32:49,548 DEBUG [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/oldWALs 2024-11-28T00:32:49,548 INFO [RS:2;72c8c7f04009:36427 {}] wal.AbstractFSWAL(1259): Closed WAL: AsyncFSWAL 72c8c7f04009%2C36427%2C1732753506332:(num 1732753508405) 2024-11-28T00:32:49,548 DEBUG [RS:2;72c8c7f04009:36427 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-28T00:32:49,548 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.LeaseManager(133): Closed leases 2024-11-28T00:32:49,548 INFO [RS:2;72c8c7f04009:36427 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:32:49,549 INFO [RS:2;72c8c7f04009:36427 {}] hbase.ChoreService(370): Chore service for: regionserver/72c8c7f04009:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-28T00:32:49,549 INFO [RS:2;72c8c7f04009:36427 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:32:49,549 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/72c8c7f04009,41621,1732753506207 already deleted, retry=false 2024-11-28T00:32:49,549 INFO [regionserver/72c8c7f04009:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:32:49,549 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 72c8c7f04009,41621,1732753506207 expired; onlineServers=1 2024-11-28T00:32:49,549 INFO [RS:2;72c8c7f04009:36427 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36427 2024-11-28T00:32:49,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/72c8c7f04009,36427,1732753506332 2024-11-28T00:32:49,551 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-28T00:32:49,551 INFO [RS:2;72c8c7f04009:36427 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:32:49,552 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [72c8c7f04009,36427,1732753506332] 2024-11-28T00:32:49,553 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/72c8c7f04009,36427,1732753506332 already deleted, retry=false 2024-11-28T00:32:49,553 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 72c8c7f04009,36427,1732753506332 expired; onlineServers=0 2024-11-28T00:32:49,553 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '72c8c7f04009,39673,1732753505055' ***** 2024-11-28T00:32:49,553 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-28T00:32:49,553 INFO [M:0;72c8c7f04009:39673 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-28T00:32:49,553 INFO [M:0;72c8c7f04009:39673 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-28T00:32:49,554 DEBUG [M:0;72c8c7f04009:39673 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-28T00:32:49,554 DEBUG [M:0;72c8c7f04009:39673 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-28T00:32:49,554 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-28T00:32:49,554 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.large.0-1732753507802 {}] cleaner.HFileCleaner(306): Exit Thread[master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.large.0-1732753507802,5,FailOnTimeoutGroup] 2024-11-28T00:32:49,554 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.small.0-1732753507820 {}] cleaner.HFileCleaner(306): Exit Thread[master/72c8c7f04009:0:becomeActiveMaster-HFileCleaner.small.0-1732753507820,5,FailOnTimeoutGroup] 2024-11-28T00:32:49,554 INFO [M:0;72c8c7f04009:39673 {}] hbase.ChoreService(370): Chore service for: master/72c8c7f04009:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-28T00:32:49,554 INFO [M:0;72c8c7f04009:39673 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-28T00:32:49,554 DEBUG [M:0;72c8c7f04009:39673 {}] master.HMaster(1795): Stopping service threads 2024-11-28T00:32:49,554 INFO [M:0;72c8c7f04009:39673 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-28T00:32:49,554 INFO [M:0;72c8c7f04009:39673 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-28T00:32:49,555 INFO [M:0;72c8c7f04009:39673 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-28T00:32:49,555 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-28T00:32:49,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-28T00:32:49,555 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-28T00:32:49,556 DEBUG [M:0;72c8c7f04009:39673 {}] zookeeper.ZKUtil(347): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-28T00:32:49,556 WARN [M:0;72c8c7f04009:39673 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-28T00:32:49,557 INFO [M:0;72c8c7f04009:39673 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/.lastflushedseqids 2024-11-28T00:32:49,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43159 is added to blk_1073742474_1650 (size=307) 2024-11-28T00:32:49,570 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073742474_1650 (size=307) 2024-11-28T00:32:49,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45909 is added to blk_1073742474_1650 (size=307) 2024-11-28T00:32:49,571 INFO [M:0;72c8c7f04009:39673 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-28T00:32:49,571 INFO [M:0;72c8c7f04009:39673 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-28T00:32:49,571 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-28T00:32:49,592 INFO [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:32:49,592 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:32:49,592 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-28T00:32:49,592 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-28T00:32:49,592 INFO [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=998.95 KB heapSize=1.17 MB 2024-11-28T00:32:49,593 ERROR [AsyncFSWAL-0-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055 {}] server.NIOServerCnxnFactory(85): Thread Thread[AsyncFSWAL-0-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055,5,FailOnTimeoutGroup] died java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.readableBytes()" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.buffered(FanOutOneBlockAsyncDFSOutput.java:414) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.append(AsyncProtobufLogWriter.java:134) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:181) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncFSWAL.doAppend(AsyncFSWAL.java:100) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendEntry(AbstractFSWAL.java:1333) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.appendAndSync(AbstractFSWAL.java:1724) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.consume(AbstractFSWAL.java:1832) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:32:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43335-0x1003b83f01b0001, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,647 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41621-0x1003b83f01b0002, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,647 INFO [RS:1;72c8c7f04009:41621 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:32:49,647 INFO [RS:0;72c8c7f04009:43335 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:32:49,648 INFO [RS:0;72c8c7f04009:43335 {}] regionserver.HRegionServer(1031): Exiting; stopping=72c8c7f04009,43335,1732753506004; zookeeper connection closed. 2024-11-28T00:32:49,648 INFO [RS:1;72c8c7f04009:41621 {}] regionserver.HRegionServer(1031): Exiting; stopping=72c8c7f04009,41621,1732753506207; zookeeper connection closed. 2024-11-28T00:32:49,648 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@618a790b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@618a790b 2024-11-28T00:32:49,648 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@5e0a862d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@5e0a862d 2024-11-28T00:32:49,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,652 INFO [RS:2;72c8c7f04009:36427 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:32:49,652 INFO [RS:2;72c8c7f04009:36427 {}] regionserver.HRegionServer(1031): Exiting; stopping=72c8c7f04009,36427,1732753506332; zookeeper connection closed. 2024-11-28T00:32:49,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36427-0x1003b83f01b0003, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:32:49,652 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@3c402904 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@3c402904 2024-11-28T00:32:49,653 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 3 regionserver(s) complete 2024-11-28T00:32:51,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741834_1010 (size=17981) 2024-11-28T00:32:51,397 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741836_1012 (size=102297) 2024-11-28T00:32:51,415 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741835_1011 (size=12667) 2024-11-28T00:32:51,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39705 is added to blk_1073741830_1006 (size=1174175) 2024-11-28T00:32:54,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.RegionServer.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_testExportExpiredSnapshot 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_acl 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: Master,sub=Coprocessor.Master.CP_org.apache.hadoop.hbase.security.access.SecureTestUtil$MasterSyncObserver 2024-11-28T00:32:55,541 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.security.access.AccessController 2024-11-28T00:33:01,043 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:33:03,957 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:33:33,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;72c8c7f04009:39673 240 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@61fb92fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 26 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 33 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 22 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16a9278f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5315 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 54 Waiting on java.util.concurrent.CountDownLatch$Sync@1ad42664 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11889 Waited count: 12726 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@38563cd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@671ce108 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1057 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3634 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@98fcdf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43901): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 178 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 176 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 51930 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1783 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5434b52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43901): State: TIMED_WAITING Blocked count: 68 Waited count: 2698 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43901): State: TIMED_WAITING Blocked count: 83 Waited count: 2703 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43901): State: TIMED_WAITING Blocked count: 69 Waited count: 2713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43901): State: TIMED_WAITING Blocked count: 59 Waited count: 2701 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43901): State: TIMED_WAITING Blocked count: 66 Waited count: 2717 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 264 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 19 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1054 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43091): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 3 Waited count: 365 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@457a5abb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1490 Waited count: 1672 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1065665691) connection to localhost/127.0.0.1:43901 from jenkins): State: TIMED_WAITING Blocked count: 1668 Waited count: 1669 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1421273556-122-acceptor-0@515ec047-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 0 Waited count: 2302 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1054 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44599): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 308 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@246a88f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1488 Waited count: 1677 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 528 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 529 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@5ede5822-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 9 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1052 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 46339): State: TIMED_WAITING Blocked count: 1 Waited count: 54 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 192 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 106 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@4ef3d9c0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 0 Waited count: 329 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc1b722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@175a0b95[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1482 Waited count: 1690 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 194 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 527 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 532 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 533 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@5bccb5c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 18 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64116): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 264 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 408 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37424b9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:64116):): State: WAITING Blocked count: 2 Waited count: 498 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24c5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 528 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5491ba47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 248 (LeaseRenewer:jenkins@localhost:43901): State: TIMED_WAITING Blocked count: 15 Waited count: 548 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@27e6e636 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 427 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:64116)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64c6304f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ca968c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 98 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ceee830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 102 Waited count: 402 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d97862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 139 Waited count: 639 Waiting on java.util.concurrent.Semaphore$NonfairSync@13c86cbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673): State: WAITING Blocked count: 104 Waited count: 9875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5460d142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@32337978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@624e4e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@307679d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f2983f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678c7d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 11 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;72c8c7f04009:39673): State: TIMED_WAITING Blocked count: 12 Waited count: 4632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1095/0x00007f1eb0f96a30.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@3f4880aa): State: TIMED_WAITING Blocked count: 0 Waited count: 175 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5219 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 74 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 22 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 160 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 53 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 52132 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e43c653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58d4e93a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4f2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3534750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 488 (LeaseRenewer:jenkins.hfs.1@localhost:43901): State: TIMED_WAITING Blocked count: 15 Waited count: 548 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 494 (LeaseRenewer:jenkins.hfs.0@localhost:43901): State: TIMED_WAITING Blocked count: 15 Waited count: 548 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 498 (LeaseRenewer:jenkins.hfs.2@localhost:43901): State: TIMED_WAITING Blocked count: 15 Waited count: 548 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 509 (region-location-0): State: WAITING Blocked count: 13 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 51957 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 541 (ForkJoinPool.commonPool-worker-1): State: TIMED_WAITING Blocked count: 0 Waited count: 816 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 542 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 272 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 553 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 567 (region-location-1): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1147 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@737a5e04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1282 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1283 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1644 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@1389038c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1871 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1872 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2014 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2210 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 596 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2748 (ForkJoinPool.commonPool-worker-5): State: WAITING Blocked count: 0 Waited count: 637 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6595 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6596 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6597 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11362 (AsyncFSWAL-1-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19211111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11365 (java.util.concurrent.ThreadPoolExecutor$Worker@29f8e368[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11366 (java.util.concurrent.ThreadPoolExecutor$Worker@203f9bdd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11369 (java.util.concurrent.ThreadPoolExecutor$Worker@1092f2bd[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11370 (java.util.concurrent.ThreadPoolExecutor$Worker@2c41ef71[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11374 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 5 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:34:03,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:34:33,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;72c8c7f04009:39673 232 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@61fb92fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 27 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 36 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 25 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16a9278f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 5914 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 60 Waiting on java.util.concurrent.CountDownLatch$Sync@4fa5b6e1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11889 Waited count: 12727 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@38563cd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@671ce108 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1177 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3634 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@98fcdf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43901): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 198 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 196 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 57893 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1783 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5434b52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43901): State: TIMED_WAITING Blocked count: 68 Waited count: 2759 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43901): State: TIMED_WAITING Blocked count: 83 Waited count: 2765 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43901): State: TIMED_WAITING Blocked count: 69 Waited count: 2774 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43901): State: TIMED_WAITING Blocked count: 59 Waited count: 2762 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43901): State: TIMED_WAITING Blocked count: 66 Waited count: 2778 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 294 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 21 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43091): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 3 Waited count: 385 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@457a5abb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1510 Waited count: 1712 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1065665691) connection to localhost/127.0.0.1:43901 from jenkins): State: TIMED_WAITING Blocked count: 1728 Waited count: 1729 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1421273556-122-acceptor-0@515ec047-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 0 Waited count: 2362 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1174 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44599): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 328 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@246a88f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1508 Waited count: 1717 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 588 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 589 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@5ede5822-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1172 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 46339): State: TIMED_WAITING Blocked count: 1 Waited count: 60 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 192 (pool-15-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 118 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@4ef3d9c0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 0 Waited count: 349 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc1b722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@175a0b95[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1502 Waited count: 1730 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 194 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 587 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 592 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 593 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@5bccb5c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 20 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64116): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 294 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 413 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37424b9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:64116):): State: WAITING Blocked count: 2 Waited count: 503 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24c5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 533 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5491ba47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@27e6e636 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 467 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:64116)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64c6304f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ca968c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ceee830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 102 Waited count: 402 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d97862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 139 Waited count: 639 Waiting on java.util.concurrent.Semaphore$NonfairSync@13c86cbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673): State: WAITING Blocked count: 104 Waited count: 9875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5460d142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@32337978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@624e4e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@307679d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f2983f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678c7d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 11 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;72c8c7f04009:39673): State: TIMED_WAITING Blocked count: 12 Waited count: 4632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1095/0x00007f1eb0f96a30.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@3f4880aa): State: TIMED_WAITING Blocked count: 0 Waited count: 195 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 5818 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 74 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 22 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36be2c44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 59 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 58134 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e43c653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58d4e93a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4f2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3534750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (region-location-0): State: WAITING Blocked count: 13 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 57959 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (ForkJoinPool.commonPool-worker-2): State: WAITING Blocked count: 0 Waited count: 272 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 553 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 567 (region-location-1): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1153 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@737a5e04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1282 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1283 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1644 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@1389038c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1871 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1872 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2014 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2210 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 596 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 2748 (ForkJoinPool.commonPool-worker-5): State: TIMED_WAITING Blocked count: 0 Waited count: 638 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6595 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6596 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6597 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11362 (AsyncFSWAL-1-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19211111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11374 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11375 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:35:03,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:35:06,494 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): data stats (chunk size=2097152): current pool size=7, created chunk count=10, reused chunk count=25, reuseRatio=71.43% 2024-11-28T00:35:06,495 DEBUG [master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics {}] regionserver.ChunkCreator$MemStoreChunkPool$StatisticsThread(417): index stats (chunk size=209715): current pool size=0, created chunk count=0, reused chunk count=0, reuseRatio=0 2024-11-28T00:35:14,377 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-jobhistoryserver.properties,hadoop-metrics2.properties 2024-11-28T00:35:33,958 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;72c8c7f04009:39673 231 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@61fb92fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 28 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 39 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 28 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16a9278f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 6514 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 66 Waiting on java.util.concurrent.CountDownLatch$Sync@31775e54 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11889 Waited count: 12728 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@38563cd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@671ce108 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1297 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3634 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@98fcdf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43901): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 218 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 216 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 63857 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1783 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5434b52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43901): State: TIMED_WAITING Blocked count: 68 Waited count: 2820 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43901): State: TIMED_WAITING Blocked count: 83 Waited count: 2826 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43901): State: TIMED_WAITING Blocked count: 69 Waited count: 2835 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43901): State: TIMED_WAITING Blocked count: 59 Waited count: 2823 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43901): State: TIMED_WAITING Blocked count: 66 Waited count: 2839 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 23 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1294 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43091): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 3 Waited count: 405 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@457a5abb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1530 Waited count: 1752 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1065665691) connection to localhost/127.0.0.1:43901 from jenkins): State: TIMED_WAITING Blocked count: 1788 Waited count: 1789 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1421273556-122-acceptor-0@515ec047-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 0 Waited count: 2422 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1294 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44599): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 348 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@246a88f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1528 Waited count: 1757 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 648 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 649 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@5ede5822-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 11 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1293 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 46339): State: TIMED_WAITING Blocked count: 1 Waited count: 66 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 192 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43b7ff35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ae41503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 130 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@4ef3d9c0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 0 Waited count: 369 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc1b722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@175a0b95[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1522 Waited count: 1770 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 194 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 647 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 655 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dc838f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@5bccb5c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 22 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64116): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 324 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 417 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37424b9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:64116):): State: WAITING Blocked count: 2 Waited count: 507 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24c5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 537 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5491ba47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@27e6e636 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 510 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:64116)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64c6304f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ca968c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 99 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ceee830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 102 Waited count: 402 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d97862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 139 Waited count: 639 Waiting on java.util.concurrent.Semaphore$NonfairSync@13c86cbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673): State: WAITING Blocked count: 104 Waited count: 9875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5460d142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@32337978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@624e4e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@307679d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f2983f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678c7d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 11 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;72c8c7f04009:39673): State: TIMED_WAITING Blocked count: 12 Waited count: 4632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1095/0x00007f1eb0f96a30.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@3f4880aa): State: TIMED_WAITING Blocked count: 0 Waited count: 215 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 6418 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 74 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 22 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36be2c44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 65 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 64135 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e43c653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58d4e93a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4f2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3534750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (region-location-0): State: WAITING Blocked count: 13 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 63960 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 542 (ForkJoinPool.commonPool-worker-2): State: TIMED_WAITING Blocked count: 0 Waited count: 273 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 553 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 567 (region-location-1): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1159 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@737a5e04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1282 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1283 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1644 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@1389038c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1871 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1872 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2014 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2210 (ForkJoinPool.commonPool-worker-4): State: WAITING Blocked count: 0 Waited count: 596 Waiting on java.util.concurrent.ForkJoinPool@c30d24e Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6595 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6596 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6597 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11362 (AsyncFSWAL-1-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19211111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11375 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11379 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:36:03,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:36:33,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;72c8c7f04009:39673 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@61fb92fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 29 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 42 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 31 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16a9278f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7114 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 72 Waiting on java.util.concurrent.CountDownLatch$Sync@61c549a4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11889 Waited count: 12729 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@38563cd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@671ce108 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1417 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3634 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@98fcdf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43901): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 238 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 236 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 69821 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1783 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5434b52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43901): State: TIMED_WAITING Blocked count: 68 Waited count: 2881 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43901): State: TIMED_WAITING Blocked count: 83 Waited count: 2887 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43901): State: TIMED_WAITING Blocked count: 69 Waited count: 2896 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43901): State: TIMED_WAITING Blocked count: 59 Waited count: 2885 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43901): State: TIMED_WAITING Blocked count: 66 Waited count: 2901 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 354 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 25 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 14 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1414 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43091): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 3 Waited count: 425 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@457a5abb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1550 Waited count: 1792 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1065665691) connection to localhost/127.0.0.1:43901 from jenkins): State: TIMED_WAITING Blocked count: 1848 Waited count: 1849 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1421273556-122-acceptor-0@515ec047-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 0 Waited count: 2482 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1414 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44599): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 368 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@246a88f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1548 Waited count: 1797 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 708 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 709 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@5ede5822-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 12 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1413 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 46339): State: TIMED_WAITING Blocked count: 1 Waited count: 72 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 192 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43b7ff35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ae41503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@4ef3d9c0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 0 Waited count: 389 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc1b722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@175a0b95[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1542 Waited count: 1810 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 194 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 707 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 715 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dc838f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@5bccb5c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 24 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64116): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 354 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 421 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37424b9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:64116):): State: WAITING Blocked count: 2 Waited count: 511 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24c5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 541 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5491ba47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@27e6e636 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 550 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:64116)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64c6304f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ca968c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ceee830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 102 Waited count: 402 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d97862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 139 Waited count: 639 Waiting on java.util.concurrent.Semaphore$NonfairSync@13c86cbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673): State: WAITING Blocked count: 104 Waited count: 9875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5460d142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@32337978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@624e4e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@307679d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f2983f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678c7d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 11 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;72c8c7f04009:39673): State: TIMED_WAITING Blocked count: 12 Waited count: 4632 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:169) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1095/0x00007f1eb0f96a30.run(Unknown Source) app//org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) app//org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) app//org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) app//org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) app//org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) app//org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@3f4880aa): State: TIMED_WAITING Blocked count: 0 Waited count: 235 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7018 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 74 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 22 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36be2c44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 71 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 70137 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e43c653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58d4e93a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4f2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3534750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (region-location-0): State: WAITING Blocked count: 13 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 69962 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 567 (region-location-1): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1165 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@737a5e04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1282 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1283 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1644 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@1389038c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1871 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1872 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2014 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2210 (ForkJoinPool.commonPool-worker-4): State: TIMED_WAITING Blocked count: 0 Waited count: 597 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Thread 6595 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6596 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6597 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11362 (AsyncFSWAL-1-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19211111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11375 (process reaper): State: TIMED_WAITING Blocked count: 0 Waited count: 6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferStack.transfer(SynchronousQueue.java:401) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1061) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11379 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 10 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) 2024-11-28T00:37:03,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:37:33,959 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-28T00:37:49,593 DEBUG [M:0;72c8c7f04009:39673 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732753969571Disabling compacts and flushes for region at 1732753969571Disabling writes for close at 1732753969592 (+21 ms)Obtaining lock to block concurrent updates at 1732753969592Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732753969592Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=1022927, getHeapSize=1226728, getOffHeapSize=0, getCellsCount=2686 at 1732753969592Failed flush master:store,,1.1595e783b53d99cd5eef43b6debb2682., putting online again at 1732754269593 (+300001 ms) 2024-11-28T00:37:49,593 WARN [M:0;72c8c7f04009:39673 {}] region.MasterRegion(134): Failed to close region org.apache.hadoop.hbase.regionserver.wal.WALSyncTimeoutIOException: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4609, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1033) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doSync(AbstractFSWAL.java:1940) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$sync$2(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:723) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.sync(AbstractFSWAL.java:713) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doSyncOfUnflushedWALChanges(HRegion.java:2935) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalPrepareFlushCache(HRegion.java:2876) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2735) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2709) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.internalFlushcache(HRegion.java:2700) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.doClose(HRegion.java:1862) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1672) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1627) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.close(HRegion.java:1610) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.closeRegion(MasterRegion.java:132) ~[classes/:?] at org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:205) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) ~[classes/:?] at org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) ~[classes/:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.exceptions.TimeoutIOException: Failed to get sync result after 300000 ms for txid=4609, WAL system stuck? at org.apache.hadoop.hbase.regionserver.wal.SyncFuture.get(SyncFuture.java:171) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.blockOnSync(AbstractFSWAL.java:1029) ~[classes/:?] ... 19 more 2024-11-28T00:37:49,595 WARN [Close-WAL-Writer-0 {}] wal.AsyncProtobufLogWriter(165): normal close failed, try recover java.lang.NullPointerException: Cannot invoke "org.apache.hbase.thirdparty.io.netty.buffer.ByteBuf.ensureWritable(int)" because "this.buf" is null at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.writeInt(FanOutOneBlockAsyncDFSOutput.java:391) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.lambda$writeWALTrailerAndMagic$3(AsyncProtobufLogWriter.java:247) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALMetadata(AsyncProtobufLogWriter.java:203) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.writeWALTrailerAndMagic(AsyncProtobufLogWriter.java:240) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractProtobufLogWriter.writeWALTrailer(AbstractProtobufLogWriter.java:252) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:162) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:37:49,597 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-28T00:37:49,597 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-28T00:37:49,597 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file /user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 2024-11-28T00:37:49,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:37:49,599 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.InterruptedIOException: Operation cancelled at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.checkIfCancelled(RecoverLeaseFSUtils.java:269) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:159) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.io.asyncfs.FanOutOneBlockAsyncDFSOutput.recoverAndClose(FanOutOneBlockAsyncDFSOutput.java:605) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AsyncProtobufLogWriter.close(AsyncProtobufLogWriter.java:166) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2041) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:37:49,599 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 2024-11-28T00:37:49,599 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=0 on file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 after 0ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Process Thread Dump: Automatic Stack Trace every 60 seconds waiting on M:0;72c8c7f04009:39673 230 active threads Thread 1 (main): State: TIMED_WAITING Blocked count: 1 Waited count: 4 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.junit.internal.runners.statements.FailOnTimeout.getResult(FailOnTimeout.java:167) app//org.junit.internal.runners.statements.FailOnTimeout.evaluate(FailOnTimeout.java:128) app//org.apache.hadoop.hbase.SystemExitRule$1.evaluate(SystemExitRule.java:39) app//org.junit.rules.RunRules.evaluate(RunRules.java:20) app//org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) app//org.junit.runners.ParentRunner.run(ParentRunner.java:413) app//org.apache.maven.surefire.junit4.JUnit4Provider.execute(JUnit4Provider.java:316) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeWithRerun(JUnit4Provider.java:240) app//org.apache.maven.surefire.junit4.JUnit4Provider.executeTestSet(JUnit4Provider.java:214) app//org.apache.maven.surefire.junit4.JUnit4Provider.invoke(JUnit4Provider.java:155) app//org.apache.maven.surefire.booter.ForkedBooter.runSuitesInProcess(ForkedBooter.java:385) app//org.apache.maven.surefire.booter.ForkedBooter.execute(ForkedBooter.java:162) app//org.apache.maven.surefire.booter.ForkedBooter.run(ForkedBooter.java:507) app//org.apache.maven.surefire.booter.ForkedBooter.main(ForkedBooter.java:495) Thread 2 (Reference Handler): State: RUNNABLE Blocked count: 4 Waited count: 0 Stack: java.base@17.0.11/java.lang.ref.Reference.waitForReferencePendingList(Native Method) java.base@17.0.11/java.lang.ref.Reference.processPendingReferences(Reference.java:253) java.base@17.0.11/java.lang.ref.Reference$ReferenceHandler.run(Reference.java:215) Thread 3 (Finalizer): State: WAITING Blocked count: 33 Waited count: 22 Waiting on java.lang.ref.ReferenceQueue$Lock@61fb92fa Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) java.base@17.0.11/java.lang.ref.Finalizer$FinalizerThread.run(Finalizer.java:172) Thread 4 (Signal Dispatcher): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 12 (Common-Cleaner): State: TIMED_WAITING Blocked count: 21 Waited count: 30 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/jdk.internal.ref.CleanerImpl.run(CleanerImpl.java:140) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) java.base@17.0.11/jdk.internal.misc.InnocuousThread.run(InnocuousThread.java:162) Thread 13 (Notification Thread): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: Thread 14 (pool-1-thread-1): State: RUNNABLE Blocked count: 0 Waited count: 45 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.poll(EPollPort.java:200) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:281) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 15 (pool-1-thread-2): State: WAITING Blocked count: 0 Waited count: 34 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@16a9278f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) java.base@17.0.11/sun.nio.ch.EPollPort$EventHandlerTask.run(EPollPort.java:275) java.base@17.0.11/sun.nio.ch.AsynchronousChannelGroupImpl$1.run(AsynchronousChannelGroupImpl.java:113) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 16 (surefire-forkedjvm-stream-flusher): State: TIMED_WAITING Blocked count: 0 Waited count: 7713 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 18 (surefire-forkedjvm-command-thread): State: WAITING Blocked count: 0 Waited count: 78 Waiting on java.util.concurrent.CountDownLatch$Sync@600366bc Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.CountDownLatch.await(CountDownLatch.java:230) java.base@17.0.11/sun.nio.ch.PendingFuture.get(PendingFuture.java:178) app//org.apache.maven.surefire.api.util.internal.Channels$2.read(Channels.java:127) java.base@17.0.11/java.io.BufferedInputStream.fill(BufferedInputStream.java:244) java.base@17.0.11/java.io.BufferedInputStream.read1(BufferedInputStream.java:284) java.base@17.0.11/java.io.BufferedInputStream.read(BufferedInputStream.java:343) app//org.apache.maven.surefire.api.util.internal.Channels$3.readImpl(Channels.java:169) app//org.apache.maven.surefire.api.util.internal.AbstractNoninterruptibleReadableChannel.read(AbstractNoninterruptibleReadableChannel.java:50) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:430) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.read(AbstractStreamDecoder.java:419) app//org.apache.maven.surefire.api.stream.AbstractStreamDecoder.readMessageType(AbstractStreamDecoder.java:116) app//org.apache.maven.surefire.booter.stream.CommandDecoder.decode(CommandDecoder.java:77) app//org.apache.maven.surefire.booter.spi.CommandChannelDecoder.decode(CommandChannelDecoder.java:60) app//org.apache.maven.surefire.booter.CommandReader$CommandRunnable.run(CommandReader.java:290) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 22 (Time-limited test): State: RUNNABLE Blocked count: 11889 Waited count: 12730 Stack: java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo1(Native Method) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:197) java.management@17.0.11/sun.management.ThreadImpl.getThreadInfo(ThreadImpl.java:154) app//org.apache.hadoop.hbase.util.ReflectionUtils.printThreadInfo(ReflectionUtils.java:181) app//org.apache.hadoop.hbase.util.Threads.printThreadInfo(Threads.java:186) app//org.apache.hadoop.hbase.util.Threads.threadDumpingIsAlive(Threads.java:113) app//org.apache.hadoop.hbase.LocalHBaseCluster.join(LocalHBaseCluster.java:396) app//org.apache.hadoop.hbase.SingleProcessHBaseCluster.waitUntilShutDown(SingleProcessHBaseCluster.java:886) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1038) app//org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) app//org.apache.hadoop.hbase.snapshot.TestExportSnapshot.tearDownAfterClass(TestExportSnapshot.java:123) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) java.base@17.0.11/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) java.base@17.0.11/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) java.base@17.0.11/java.lang.reflect.Method.invoke(Method.java:568) app//org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) app//org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) app//org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) app//org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) app//org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) Thread 23 (org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner): State: WAITING Blocked count: 16 Waited count: 16 Waiting on java.lang.ref.ReferenceQueue$Lock@38563cd2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 25 (SSL Certificates Store Monitor): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.TaskQueue@671ce108 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 34 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@58cc40a9): State: TIMED_WAITING Blocked count: 0 Waited count: 1537 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 35 (GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Thread 36 (pool-6-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 37 (qtp1861387285-37): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 38 (qtp1861387285-38): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 39 (qtp1861387285-39): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 40 (qtp1861387285-40): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 41 (qtp1861387285-41-acceptor-0@d3b961d-ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:35791}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 42 (qtp1861387285-42): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 43 (qtp1861387285-43): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 44 (qtp1861387285-44): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 45 (Session-HouseKeeper-61abf97f-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 46 (pool-7-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 53 (FSEditLogAsync): State: WAITING Blocked count: 45 Waited count: 3634 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@98fcdf1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.dequeueEdit(FSEditLogAsync.java:241) app//org.apache.hadoop.hdfs.server.namenode.FSEditLogAsync.run(FSEditLogAsync.java:250) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 55 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 1 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 56 (IPC Server idle connection scanner for port 43901): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 58 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 61 (org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor@647b647f): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.PendingReconstructionBlocks$PendingReconstructionMonitor.run(PendingReconstructionBlocks.java:267) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 62 (DatanodeAdminMonitor-0): State: TIMED_WAITING Blocked count: 0 Waited count: 258 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 49 (org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor@86c698e): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.HeartbeatManager$Monitor.run(HeartbeatManager.java:563) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 47 (RedundancyMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 256 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) java.base@17.0.11/java.lang.Thread.sleep(Thread.java:344) java.base@17.0.11/java.util.concurrent.TimeUnit.sleep(TimeUnit.java:446) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$RedundancyMonitor.run(BlockManager.java:5352) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 48 (MarkedDeleteBlockScrubberThread): State: TIMED_WAITING Blocked count: 0 Waited count: 75785 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$MarkedDeleteBlockScrubber.run(BlockManager.java:5326) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 51 (Block report processor): State: WAITING Blocked count: 1 Waited count: 1783 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5434b52f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ArrayBlockingQueue.take(ArrayBlockingQueue.java:420) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.processQueue(BlockManager.java:5627) app//org.apache.hadoop.hdfs.server.blockmanagement.BlockManager$BlockReportProcessingThread.run(BlockManager.java:5614) Thread 57 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 54 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 64 (IPC Server handler 0 on default port 43901): State: TIMED_WAITING Blocked count: 68 Waited count: 2943 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 65 (IPC Server handler 1 on default port 43901): State: TIMED_WAITING Blocked count: 83 Waited count: 2948 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 66 (IPC Server handler 2 on default port 43901): State: TIMED_WAITING Blocked count: 69 Waited count: 2957 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 67 (IPC Server handler 3 on default port 43901): State: TIMED_WAITING Blocked count: 59 Waited count: 2946 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 68 (IPC Server handler 4 on default port 43901): State: TIMED_WAITING Blocked count: 66 Waited count: 2962 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 69 (pool-12-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 71 (org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor@6da51f12): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.LeaseManager$Monitor.run(LeaseManager.java:537) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 72 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor@5e705095): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeResourceMonitor.run(FSNamesystem.java:4550) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 73 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller@26af3662): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$NameNodeEditLogRoller.run(FSNamesystem.java:4592) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 74 (org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber@43d016d): State: TIMED_WAITING Blocked count: 0 Waited count: 4 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.server.namenode.FSNamesystem$LazyPersistFileScrubber.run(FSNamesystem.java:4689) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 75 (CacheReplicationMonitor(1367253335)): State: TIMED_WAITING Blocked count: 0 Waited count: 27 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1759) app//org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor.run(CacheReplicationMonitor.java:186) Thread 86 (pool-18-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 87 (qtp795976251-87): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 88 (qtp795976251-88-acceptor-0@5c7448af-ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:46399}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 89 (qtp795976251-89): State: TIMED_WAITING Blocked count: 0 Waited count: 17 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 90 (qtp795976251-90): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 91 (Session-HouseKeeper-2ee2162-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 92 (nioEventLoopGroup-2-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 93 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@4e5a1cf1): State: TIMED_WAITING Blocked count: 0 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 95 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 96 (IPC Server idle connection scanner for port 43091): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 98 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 101 (Command processor): State: WAITING Blocked count: 3 Waited count: 445 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@457a5abb Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 102 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1570 Waited count: 1832 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 103 (pool-20-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 85 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ae688ac): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 97 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 94 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 104 (IPC Server handler 0 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 105 (IPC Server handler 1 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 106 (IPC Server handler 2 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 107 (IPC Server handler 3 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 108 (IPC Server handler 4 on default port 43091): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 120 (pool-26-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 121 (qtp1421273556-121): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 115 (IPC Client (1065665691) connection to localhost/127.0.0.1:43901 from jenkins): State: TIMED_WAITING Blocked count: 1908 Waited count: 1909 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Thread 122 (qtp1421273556-122-acceptor-0@515ec047-ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:39963}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 118 (IPC Parameter Sending Thread for localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 0 Waited count: 2542 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 123 (qtp1421273556-123): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 124 (qtp1421273556-124): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 125 (Session-HouseKeeper-13de1d7e-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 126 (nioEventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 127 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@65d9109d): State: TIMED_WAITING Blocked count: 0 Waited count: 1534 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 129 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 130 (IPC Server idle connection scanner for port 44599): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 132 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 135 (Command processor): State: WAITING Blocked count: 1 Waited count: 388 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@246a88f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 137 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1568 Waited count: 1837 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 138 (pool-29-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 119 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@660ae03b): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 131 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 128 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 139 (IPC Server handler 0 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 768 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 140 (IPC Server handler 1 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 141 (IPC Server handler 2 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 142 (IPC Server handler 3 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 769 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 143 (IPC Server handler 4 on default port 44599): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 155 (pool-36-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 156 (qtp334392358-156): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.eclipse.jetty.io.ManagedSelector.nioSelect(ManagedSelector.java:183) app//org.eclipse.jetty.io.ManagedSelector.select(ManagedSelector.java:190) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.select(ManagedSelector.java:606) app//org.eclipse.jetty.io.ManagedSelector$SelectorProducer.produce(ManagedSelector.java:543) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produceTask(EatWhatYouKill.java:362) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:186) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.tryProduce(EatWhatYouKill.java:173) app//org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:137) app//org.eclipse.jetty.io.ManagedSelector$$Lambda$254/0x00007f1eb042d2a8.run(Unknown Source) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 157 (qtp334392358-157-acceptor-0@5ede5822-ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:37047}): State: RUNNABLE Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) app//org.eclipse.jetty.server.ServerConnector.accept(ServerConnector.java:388) app//org.eclipse.jetty.server.AbstractConnector$Acceptor.run(AbstractConnector.java:704) app//org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:883) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1034) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 158 (qtp334392358-158): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 159 (qtp334392358-159): State: TIMED_WAITING Blocked count: 0 Waited count: 13 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) app//org.eclipse.jetty.util.BlockingArrayQueue.poll(BlockingArrayQueue.java:382) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.idleJobPoll(QueuedThreadPool.java:974) app//org.eclipse.jetty.util.thread.QueuedThreadPool$Runner.run(QueuedThreadPool.java:1018) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 161 (Session-HouseKeeper-24bad932-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 162 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 163 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3)): State: TIMED_WAITING Blocked count: 2 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 164 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 165 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 172 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 173 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 2 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 176 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 177 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 188 (nioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 191 (org.apache.hadoop.util.JvmPauseMonitor$Monitor@37aed54c): State: TIMED_WAITING Blocked count: 0 Waited count: 1533 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.JvmPauseMonitor$Monitor.run(JvmPauseMonitor.java:189) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 195 (Socket Reader #1 for port 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener$Reader.doRunLoop(Server.java:1497) app//org.apache.hadoop.ipc.Server$Listener$Reader.run(Server.java:1476) Thread 196 (IPC Server idle connection scanner for port 46339): State: TIMED_WAITING Blocked count: 1 Waited count: 78 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 192 (pool-15-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@43b7ff35 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 193 (pool-23-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@4ae41503 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 198 (Hadoop-Metrics-Updater-0): State: TIMED_WAITING Blocked count: 0 Waited count: 154 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 202 (java.util.concurrent.ThreadPoolExecutor$Worker@4ef3d9c0[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 208 (Command processor): State: WAITING Blocked count: 0 Waited count: 409 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2fc1b722 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1411) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) Thread 205 (java.util.concurrent.ThreadPoolExecutor$Worker@175a0b95[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 209 (BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901): State: TIMED_WAITING Blocked count: 1562 Waited count: 1850 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.IncrementalBlockReportManager.waitTillNextIBR(IncrementalBlockReportManager.java:158) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.offerService(BPServiceActor.java:771) app//org.apache.hadoop.hdfs.server.datanode.BPServiceActor.run(BPServiceActor.java:914) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 210 (pool-46-thread-1): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 154 (org.apache.hadoop.hdfs.server.datanode.DataXceiverServer@4ecc9509): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.Net.accept(Native Method) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.implAccept(ServerSocketChannelImpl.java:425) java.base@17.0.11/sun.nio.ch.ServerSocketChannelImpl.accept(ServerSocketChannelImpl.java:391) java.base@17.0.11/sun.nio.ch.ServerSocketAdaptor.accept(ServerSocketAdaptor.java:126) app//org.apache.hadoop.hdfs.net.TcpPeerServer.accept(TcpPeerServer.java:85) app//org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.run(DataXceiverServer.java:242) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 197 (IPC Server Responder): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.hadoop.ipc.Server$Responder.doRunLoop(Server.java:1733) app//org.apache.hadoop.ipc.Server$Responder.run(Server.java:1716) Thread 194 (IPC Server listener on 0): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hadoop.ipc.Server$Listener.run(Server.java:1559) Thread 211 (IPC Server handler 0 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 212 (IPC Server handler 1 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 213 (IPC Server handler 2 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 767 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 214 (IPC Server handler 3 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 776 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 215 (IPC Server handler 4 on default port 46339): State: TIMED_WAITING Blocked count: 0 Waited count: 776 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.poll(LinkedBlockingQueue.java:460) app//org.apache.hadoop.ipc.CallQueueManager.take(CallQueueManager.java:370) app//org.apache.hadoop.ipc.Server$Handler.run(Server.java:3165) Thread 218 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 219 (VolumeScannerThread(/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6)): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hdfs.server.datanode.VolumeScanner.run(VolumeScanner.java:656) Thread 223 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 225 (refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430): State: TIMED_WAITING Blocked count: 3 Waited count: 3 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.fs.CachingGetSpaceUsed$RefreshThread.run(CachingGetSpaceUsed.java:225) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 229 (pool-33-thread-1): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1dc838f8 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 232 (java.util.concurrent.ThreadPoolExecutor$Worker@5bccb5c3[State = -1, empty queue]): State: TIMED_WAITING Blocked count: 0 Waited count: 1 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 237 (FsDatasetAsyncDiskServiceFixer): State: TIMED_WAITING Blocked count: 0 Waited count: 26 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer.run(HBaseTestingUtil.java:576) Thread 240 (NIOServerCxnFactory.SelectorThread-1): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 239 (NIOServerCxnFactory.SelectorThread-0): State: RUNNABLE Blocked count: 6 Waited count: 1 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.select(NIOServerCnxnFactory.java:403) app//org.apache.zookeeper.server.NIOServerCnxnFactory$SelectorThread.run(NIOServerCnxnFactory.java:368) Thread 241 (NIOServerCxnFactory.AcceptThread:localhost/127.0.0.1:64116): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.select(NIOServerCnxnFactory.java:205) app//org.apache.zookeeper.server.NIOServerCnxnFactory$AcceptThread.run(NIOServerCnxnFactory.java:181) Thread 238 (ConnnectionExpirer): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.NIOServerCnxnFactory$ConnectionExpirerThread.run(NIOServerCnxnFactory.java:554) Thread 242 (SessionTracker): State: TIMED_WAITING Blocked count: 0 Waited count: 384 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Thread 243 (SyncThread:0): State: WAITING Blocked count: 16 Waited count: 426 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@37424b9f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.SyncRequestProcessor.run(SyncRequestProcessor.java:170) Thread 244 (ProcessThread(sid:0 cport:64116):): State: WAITING Blocked count: 2 Waited count: 516 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@8c24c5f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.PrepRequestProcessor.run(PrepRequestProcessor.java:142) Thread 245 (RequestThrottler): State: WAITING Blocked count: 0 Waited count: 546 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5491ba47 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.server.RequestThrottler.run(RequestThrottler.java:147) Thread 246 (NIOWorkerThread-1): State: WAITING Blocked count: 5 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 257 (weak-ref-cleaner-strictcontextstorage): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.lang.ref.ReferenceQueue$Lock@27e6e636 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 258 (HBase-Metrics2-1): State: TIMED_WAITING Blocked count: 0 Waited count: 590 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 259 (HMaster-EventLoopGroup-1-1): State: RUNNABLE Blocked count: 24 Waited count: 2 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 260 (Time-limited test-SendThread(127.0.0.1:64116)): State: RUNNABLE Blocked count: 9 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:141) app//org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:332) app//org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1289) Thread 261 (Time-limited test-EventThread): State: WAITING Blocked count: 5 Waited count: 62 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@64c6304f Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:550) Thread 262 (NIOWorkerThread-2): State: WAITING Blocked count: 6 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 263 (NIOWorkerThread-3): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 264 (NIOWorkerThread-4): State: WAITING Blocked count: 2 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 265 (zk-event-processor-pool-0): State: WAITING Blocked count: 20 Waited count: 77 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@ca968c5 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 266 (NIOWorkerThread-5): State: WAITING Blocked count: 3 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 267 (NIOWorkerThread-6): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 268 (NIOWorkerThread-7): State: WAITING Blocked count: 3 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 269 (NIOWorkerThread-8): State: WAITING Blocked count: 1 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 270 (NIOWorkerThread-9): State: WAITING Blocked count: 3 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 271 (NIOWorkerThread-10): State: WAITING Blocked count: 0 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 272 (NIOWorkerThread-11): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 273 (NIOWorkerThread-12): State: WAITING Blocked count: 2 Waited count: 101 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 274 (NIOWorkerThread-13): State: WAITING Blocked count: 2 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 275 (NIOWorkerThread-14): State: WAITING Blocked count: 5 Waited count: 100 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 276 (NIOWorkerThread-15): State: WAITING Blocked count: 1 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 277 (NIOWorkerThread-16): State: WAITING Blocked count: 3 Waited count: 102 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@2dd5b41 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 279 (RpcServer.default.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@1ceee830 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 280 (RpcServer.default.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 102 Waited count: 402 Waiting on java.util.concurrent.Semaphore$NonfairSync@46d97862 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 281 (RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 139 Waited count: 639 Waiting on java.util.concurrent.Semaphore$NonfairSync@13c86cbe Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 282 (RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39673): State: WAITING Blocked count: 104 Waited count: 9875 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@5460d142 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 283 (RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 4 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 284 (RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3f240c96 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.ipc.RpcHandler.getCallRunner(RpcHandler.java:68) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 285 (RpcServer.replication.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@32337978 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 286 (RpcServer.replication.FPBQ.Fifo.handler=1,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@624e4e1c Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 287 (RpcServer.replication.FPBQ.Fifo.handler=2,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.Semaphore$NonfairSync@307679d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 288 (RpcServer.metaPriority.FPBQ.Fifo.handler=0,queue=0,port=39673): State: WAITING Blocked count: 0 Waited count: 3 Waiting on java.util.concurrent.Semaphore$NonfairSync@2f2983f2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:211) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquire(AbstractQueuedSynchronizer.java:715) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer.acquireSharedInterruptibly(AbstractQueuedSynchronizer.java:1047) java.base@17.0.11/java.util.concurrent.Semaphore.acquire(Semaphore.java:318) app//org.apache.hadoop.hbase.ipc.FastPathRpcHandler.getCallRunner(FastPathRpcHandler.java:55) app//org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) Thread 291 (Time-limited test.named-queue-events-pool-0): State: WAITING Blocked count: 0 Waited count: 1 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@678c7d6 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 292 (MiniHBaseClusterRegionServer-EventLoopGroup-3-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 312 (MiniHBaseClusterRegionServer-EventLoopGroup-4-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 332 (MiniHBaseClusterRegionServer-EventLoopGroup-5-1): State: RUNNABLE Blocked count: 11 Waited count: 4 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 289 (M:0;72c8c7f04009:39673): State: TIMED_WAITING Blocked count: 12 Waited count: 4633 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.FutureTask.awaitDone(FutureTask.java:444) java.base@17.0.11/java.util.concurrent.FutureTask.get(FutureTask.java:203) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.shutdown(AbstractFSWAL.java:1195) app//org.apache.hadoop.hbase.wal.AbstractFSWALProvider.shutdown0(AbstractFSWALProvider.java:162) app//org.apache.hadoop.hbase.wal.AbstractWALProvider$$Lambda$1413/0x00007f1eb1239610.run(Unknown Source) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.cleanup(AbstractWALProvider.java:287) app//org.apache.hadoop.hbase.wal.AbstractWALProvider.shutdown(AbstractWALProvider.java:299) app//org.apache.hadoop.hbase.wal.WALFactory.shutdown(WALFactory.java:341) app//org.apache.hadoop.hbase.master.region.MasterRegion.shutdownWAL(MasterRegion.java:140) app//org.apache.hadoop.hbase.master.region.MasterRegion.close(MasterRegion.java:206) app//org.apache.hadoop.hbase.master.HMaster.stopServiceThreads(HMaster.java:1819) app//org.apache.hadoop.hbase.master.HMaster.run(HMaster.java:631) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 353 (Monitor thread for TaskMonitor): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 355 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 357 (master/72c8c7f04009:0:becomeActiveMaster-MemStoreChunkPool Statistics): State: TIMED_WAITING Blocked count: 0 Waited count: 3 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 359 (org.apache.hadoop.hdfs.PeerCache@3f4880aa): State: TIMED_WAITING Blocked count: 0 Waited count: 255 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 377 (master:store-WAL-Roller): State: TIMED_WAITING Blocked count: 0 Waited count: 7617 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.hbase.wal.AbstractWALRoller.run(AbstractWALRoller.java:180) Thread 392 (MiniHBaseClusterRegionServer-EventLoopGroup-5-2): State: RUNNABLE Blocked count: 74 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 393 (MiniHBaseClusterRegionServer-EventLoopGroup-5-3): State: RUNNABLE Blocked count: 22 Waited count: 3 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 405 (Idle-Rpc-Conn-Sweeper-pool-0): State: WAITING Blocked count: 0 Waited count: 172 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@36be2c44 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 416 (SnapshotHandlerChoreCleaner): State: TIMED_WAITING Blocked count: 0 Waited count: 77 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 404 (RpcClient-timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 76138 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 425 (HMaster-EventLoopGroup-1-2): State: RUNNABLE Blocked count: 18 Waited count: 1 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 426 (HMaster-EventLoopGroup-1-3): State: RUNNABLE Blocked count: 21 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 447 (RegionServerTracker-0): State: WAITING Blocked count: 8 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@6e43c653 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 471 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 17 Waited count: 35 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@58d4e93a Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 468 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 16 Waited count: 33 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1f4f2268 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 469 (regionserver/72c8c7f04009:0.procedureResultReporter): State: WAITING Blocked count: 15 Waited count: 29 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@3534750 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Thread 509 (region-location-0): State: WAITING Blocked count: 13 Waited count: 20 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 513 (RPCClient-NioEventLoopGroup-6-1): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 514 (RPCClient-NioEventLoopGroup-6-2): State: RUNNABLE Blocked count: 1 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 403 (Async-Client-Retry-Timer-pool-0): State: TIMED_WAITING Blocked count: 0 Waited count: 75963 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 521 (RPCClient-NioEventLoopGroup-6-3): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 553 (MiniHBaseClusterRegionServer-EventLoopGroup-4-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 567 (region-location-1): State: WAITING Blocked count: 4 Waited count: 11 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 568 (region-location-2): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 976 (MutableQuantiles-0): State: TIMED_WAITING Blocked count: 0 Waited count: 1171 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1039 (RPCClient-NioEventLoopGroup-6-4): State: RUNNABLE Blocked count: 7 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1069 (MiniHBaseClusterRegionServer-EventLoopGroup-3-2): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1080 (MiniHBaseClusterRegionServer-EventLoopGroup-4-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1081 (zk-permission-watcher-pool-0): State: WAITING Blocked count: 70 Waited count: 117 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@737a5e04 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1085 (RPCClient-NioEventLoopGroup-6-5): State: RUNNABLE Blocked count: 3 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1086 (MiniHBaseClusterRegionServer-EventLoopGroup-3-3): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1229 (RPCClient-NioEventLoopGroup-6-6): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1230 (RPCClient-NioEventLoopGroup-6-7): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1231 (RPCClient-NioEventLoopGroup-6-8): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1281 (RPCClient-NioEventLoopGroup-6-9): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1282 (RPCClient-NioEventLoopGroup-6-10): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1283 (RPCClient-NioEventLoopGroup-6-11): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1285 (RPCClient-NioEventLoopGroup-6-12): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1644 (Container metrics unregistration): State: WAITING Blocked count: 11 Waited count: 70 Waiting on java.util.TaskQueue@1389038c Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 1871 (region-location-3): State: WAITING Blocked count: 3 Waited count: 7 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 1872 (region-location-4): State: WAITING Blocked count: 3 Waited count: 8 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@73d26837 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 2014 (RPCClient-NioEventLoopGroup-6-13): State: RUNNABLE Blocked count: 5 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6595 (RPCClient-NioEventLoopGroup-6-14): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6596 (RPCClient-NioEventLoopGroup-6-15): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 6597 (RPCClient-NioEventLoopGroup-6-16): State: RUNNABLE Blocked count: 0 Waited count: 0 Stack: java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11362 (AsyncFSWAL-1-hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData-prefix:72c8c7f04009,39673,1732753505055): State: WAITING Blocked count: 0 Waited count: 2 Waiting on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@19211111 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11379 (Timer for 'JobHistoryServer' metrics system): State: TIMED_WAITING Blocked count: 0 Waited count: 16 Stack: java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Thread 11383 (WAL-Shutdown-0): State: TIMED_WAITING Blocked count: 0 Waited count: 2 Stack: java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.awaitTermination(ThreadPoolExecutor.java:1464) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.doShutdown(AbstractFSWAL.java:2117) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1179) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$2.call(AbstractFSWAL.java:1174) java.base@17.0.11/java.util.concurrent.FutureTask.run(FutureTask.java:264) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Thread 11384 (Close-WAL-Writer-0): State: TIMED_WAITING Blocked count: 1 Waited count: 2 Stack: java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:166) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$1403/0x00007f1eb1231e50.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) 2024-11-28T00:37:53,600 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=1 on file=hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 after 4001ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-28T00:37:54,595 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.async.wait.on.shutdown.seconds" 2024-11-28T00:37:54,596 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-28T00:37:54,596 INFO [M:0;72c8c7f04009:39673 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-28T00:37:54,596 INFO [M:0;72c8c7f04009:39673 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39673 2024-11-28T00:37:54,596 INFO [M:0;72c8c7f04009:39673 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-28T00:37:54,602 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:43901/user/jenkins/test-data/80fb5f5c-a5cc-3196-f9dc-1bbec52a035a/MasterData/WALs/72c8c7f04009,39673,1732753505055/72c8c7f04009%2C39673%2C1732753505055.1732753506962 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) ~[?:?] at jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 12 more 2024-11-28T00:37:54,698 INFO [M:0;72c8c7f04009:39673 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-28T00:37:54,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:37:54,698 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39673-0x1003b83f01b0000, quorum=127.0.0.1:64116, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-28T00:37:54,702 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@367f3488{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:37:54,702 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1c0d7560{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:37:54,703 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:37:54,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59eae107{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:37:54,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6268d40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:37:54,705 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:37:54,705 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:37:54,705 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:37:54,705 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1095226328-172.17.0.2-1732753500430 (Datanode Uuid 90ba7e90-1881-4c45-a690-64f6ef41cebd) service to localhost/127.0.0.1:43901 2024-11-28T00:37:54,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data5/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data6/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:37:54,709 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6b9a68a9{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:37:54,710 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@56952abb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:37:54,710 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:37:54,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@76645274{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:37:54,710 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16f22913{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:37:54,711 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:37:54,712 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1095226328-172.17.0.2-1732753500430 (Datanode Uuid 79b08128-f8b3-4b4e-b480-34b9004049c2) service to localhost/127.0.0.1:43901 2024-11-28T00:37:54,711 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:37:54,712 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:37:54,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data3/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,712 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data4/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,713 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:37:54,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@64986ec5{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-28T00:37:54,715 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7147894f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:37:54,715 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:37:54,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@726dcf3b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:37:54,715 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ab3224e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:37:54,716 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-28T00:37:54,716 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-28T00:37:54,716 WARN [BP-1095226328-172.17.0.2-1732753500430 heartbeating to localhost/127.0.0.1:43901 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1095226328-172.17.0.2-1732753500430 (Datanode Uuid 8fbe14a4-867b-4ed5-8ffb-59d9026469be) service to localhost/127.0.0.1:43901 2024-11-28T00:37:54,716 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-28T00:37:54,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data1/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,717 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/cluster_a44ab9f3-d40a-1fd4-17f7-c7fcb088b4e1/data/data2/current/BP-1095226328-172.17.0.2-1732753500430 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-28T00:37:54,717 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-28T00:37:54,724 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5996a1b{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-28T00:37:54,725 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@649f03da{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-28T00:37:54,725 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-28T00:37:54,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@af8acfe{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-28T00:37:54,725 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@186b7ee9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-mapreduce/target/test-data/8bc497c7-3ddb-aa73-228f-6ac165ad76c1/hadoop.log.dir/,STOPPED} 2024-11-28T00:37:54,737 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-28T00:37:54,945 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down